You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by gi...@apache.org on 2018/04/19 14:46:28 UTC

[01/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d6d46e7f0 -> ede30993e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
index 7dbc29c..5cd476c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -43,7 +43,7 @@
 <span class="sourceLineNo">035</span><a name="line.35"></a>
 <span class="sourceLineNo">036</span>  // If between two dots there is not a number, we regard it as a very large number so it is<a name="line.36"></a>
 <span class="sourceLineNo">037</span>  // higher than any numbers in the version.<a name="line.37"></a>
-<span class="sourceLineNo">038</span>  private static int VERY_LARGE_NUMBER = 100000;<a name="line.38"></a>
+<span class="sourceLineNo">038</span>  private static final int VERY_LARGE_NUMBER = 100000;<a name="line.38"></a>
 <span class="sourceLineNo">039</span><a name="line.39"></a>
 <span class="sourceLineNo">040</span>  /**<a name="line.40"></a>
 <span class="sourceLineNo">041</span>   * Get the hbase version.<a name="line.41"></a>
@@ -126,43 +126,58 @@
 <span class="sourceLineNo">118</span>      return 0;<a name="line.118"></a>
 <span class="sourceLineNo">119</span>    }<a name="line.119"></a>
 <span class="sourceLineNo">120</span><a name="line.120"></a>
-<span class="sourceLineNo">121</span>    String s1[] = v1.split("\\.|-");//1.2.3-hotfix -&gt; [1, 2, 3, hotfix]<a name="line.121"></a>
-<span class="sourceLineNo">122</span>    String s2[] = v2.split("\\.|-");<a name="line.122"></a>
+<span class="sourceLineNo">121</span>    Object[] v1Comps = getVersionComponents(v1); //1.2.3-hotfix -&gt; [1, 2, 3, hotfix]<a name="line.121"></a>
+<span class="sourceLineNo">122</span>    Object[] v2Comps = getVersionComponents(v2);<a name="line.122"></a>
 <span class="sourceLineNo">123</span>    int index = 0;<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    while (index &lt; s1.length &amp;&amp; index &lt; s2.length) {<a name="line.124"></a>
-<span class="sourceLineNo">125</span>      int va = VERY_LARGE_NUMBER, vb = VERY_LARGE_NUMBER;<a name="line.125"></a>
-<span class="sourceLineNo">126</span>      try {<a name="line.126"></a>
-<span class="sourceLineNo">127</span>        va = Integer.parseInt(s1[index]);<a name="line.127"></a>
-<span class="sourceLineNo">128</span>      } catch (Exception ingore) {<a name="line.128"></a>
-<span class="sourceLineNo">129</span>      }<a name="line.129"></a>
-<span class="sourceLineNo">130</span>      try {<a name="line.130"></a>
-<span class="sourceLineNo">131</span>        vb = Integer.parseInt(s2[index]);<a name="line.131"></a>
-<span class="sourceLineNo">132</span>      } catch (Exception ingore) {<a name="line.132"></a>
-<span class="sourceLineNo">133</span>      }<a name="line.133"></a>
-<span class="sourceLineNo">134</span>      if (va != vb) {<a name="line.134"></a>
-<span class="sourceLineNo">135</span>        return va - vb;<a name="line.135"></a>
-<span class="sourceLineNo">136</span>      }<a name="line.136"></a>
-<span class="sourceLineNo">137</span>      if (va == VERY_LARGE_NUMBER) {<a name="line.137"></a>
-<span class="sourceLineNo">138</span>        // compare as String<a name="line.138"></a>
-<span class="sourceLineNo">139</span>        int c = s1[index].compareTo(s2[index]);<a name="line.139"></a>
-<span class="sourceLineNo">140</span>        if (c != 0) {<a name="line.140"></a>
-<span class="sourceLineNo">141</span>          return c;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>        }<a name="line.142"></a>
-<span class="sourceLineNo">143</span>      }<a name="line.143"></a>
-<span class="sourceLineNo">144</span>      index++;<a name="line.144"></a>
-<span class="sourceLineNo">145</span>    }<a name="line.145"></a>
-<span class="sourceLineNo">146</span>    if (index &lt; s1.length) {<a name="line.146"></a>
-<span class="sourceLineNo">147</span>      // s1 is longer<a name="line.147"></a>
-<span class="sourceLineNo">148</span>      return 1;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>    }<a name="line.149"></a>
-<span class="sourceLineNo">150</span>    //s2 is longer<a name="line.150"></a>
-<span class="sourceLineNo">151</span>    return -1;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>  }<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>  public static void main(String[] args) {<a name="line.154"></a>
-<span class="sourceLineNo">155</span>    writeTo(System.out);<a name="line.155"></a>
-<span class="sourceLineNo">156</span>  }<a name="line.156"></a>
-<span class="sourceLineNo">157</span>}<a name="line.157"></a>
+<span class="sourceLineNo">124</span>    while (index &lt; v1Comps.length &amp;&amp; index &lt; v2Comps.length) {<a name="line.124"></a>
+<span class="sourceLineNo">125</span>      int va = v1Comps[index] instanceof Integer ? (Integer)v1Comps[index] : VERY_LARGE_NUMBER;<a name="line.125"></a>
+<span class="sourceLineNo">126</span>      int vb = v2Comps[index] instanceof Integer ? (Integer)v2Comps[index] : VERY_LARGE_NUMBER;<a name="line.126"></a>
+<span class="sourceLineNo">127</span><a name="line.127"></a>
+<span class="sourceLineNo">128</span>      if (va != vb) {<a name="line.128"></a>
+<span class="sourceLineNo">129</span>        return va - vb;<a name="line.129"></a>
+<span class="sourceLineNo">130</span>      }<a name="line.130"></a>
+<span class="sourceLineNo">131</span>      if (va == VERY_LARGE_NUMBER) {<a name="line.131"></a>
+<span class="sourceLineNo">132</span>        // here, va and vb components must be same and Strings, compare as String<a name="line.132"></a>
+<span class="sourceLineNo">133</span>        int c = ((String)v1Comps[index]).compareTo((String)v2Comps[index]);<a name="line.133"></a>
+<span class="sourceLineNo">134</span>        if (c != 0) {<a name="line.134"></a>
+<span class="sourceLineNo">135</span>          return c;<a name="line.135"></a>
+<span class="sourceLineNo">136</span>        }<a name="line.136"></a>
+<span class="sourceLineNo">137</span>      }<a name="line.137"></a>
+<span class="sourceLineNo">138</span>      index++;<a name="line.138"></a>
+<span class="sourceLineNo">139</span>    }<a name="line.139"></a>
+<span class="sourceLineNo">140</span>    if (index &lt; v1Comps.length) {<a name="line.140"></a>
+<span class="sourceLineNo">141</span>      // v1 is longer<a name="line.141"></a>
+<span class="sourceLineNo">142</span>      return 1;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    }<a name="line.143"></a>
+<span class="sourceLineNo">144</span>    //v2 is longer<a name="line.144"></a>
+<span class="sourceLineNo">145</span>    return -1;<a name="line.145"></a>
+<span class="sourceLineNo">146</span>  }<a name="line.146"></a>
+<span class="sourceLineNo">147</span><a name="line.147"></a>
+<span class="sourceLineNo">148</span>  /**<a name="line.148"></a>
+<span class="sourceLineNo">149</span>   * Returns the version components as Integer and String objects<a name="line.149"></a>
+<span class="sourceLineNo">150</span>   * Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"]<a name="line.150"></a>
+<span class="sourceLineNo">151</span>   * @return the components of the version string<a name="line.151"></a>
+<span class="sourceLineNo">152</span>   */<a name="line.152"></a>
+<span class="sourceLineNo">153</span>  static Object[] getVersionComponents(final String version) {<a name="line.153"></a>
+<span class="sourceLineNo">154</span>    assert(version != null);<a name="line.154"></a>
+<span class="sourceLineNo">155</span>    Object[] strComps = version.split("[\\.-]");<a name="line.155"></a>
+<span class="sourceLineNo">156</span>    assert(strComps.length &gt; 0);<a name="line.156"></a>
+<span class="sourceLineNo">157</span><a name="line.157"></a>
+<span class="sourceLineNo">158</span>    Object[] comps = new Object[strComps.length];<a name="line.158"></a>
+<span class="sourceLineNo">159</span>    for (int i = 0; i &lt; strComps.length; ++i) {<a name="line.159"></a>
+<span class="sourceLineNo">160</span>      try {<a name="line.160"></a>
+<span class="sourceLineNo">161</span>        comps[i] = Integer.parseInt((String) strComps[i]);<a name="line.161"></a>
+<span class="sourceLineNo">162</span>      } catch (NumberFormatException e) {<a name="line.162"></a>
+<span class="sourceLineNo">163</span>        comps[i] = strComps[i];<a name="line.163"></a>
+<span class="sourceLineNo">164</span>      }<a name="line.164"></a>
+<span class="sourceLineNo">165</span>    }<a name="line.165"></a>
+<span class="sourceLineNo">166</span>    return comps;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>  }<a name="line.167"></a>
+<span class="sourceLineNo">168</span><a name="line.168"></a>
+<span class="sourceLineNo">169</span>  public static void main(String[] args) {<a name="line.169"></a>
+<span class="sourceLineNo">170</span>    writeTo(System.out);<a name="line.170"></a>
+<span class="sourceLineNo">171</span>  }<a name="line.171"></a>
+<span class="sourceLineNo">172</span>}<a name="line.172"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/export_control.html
----------------------------------------------------------------------
diff --git a/export_control.html b/export_control.html
index 781beb0..e02ad4e 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Export Control
@@ -331,7 +331,7 @@ for more details.</p>
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/index.html
----------------------------------------------------------------------
diff --git a/index.html b/index.html
index 7d92879..79e4517 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Apache HBase™ Home</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -409,7 +409,7 @@ Apache HBase is an open-source, distributed, versioned, non-relational database
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/integration.html
----------------------------------------------------------------------
diff --git a/integration.html b/integration.html
index 362db9f..8af4390 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; CI Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -291,7 +291,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/issue-tracking.html
----------------------------------------------------------------------
diff --git a/issue-tracking.html b/issue-tracking.html
index 3835d9f..98e3464 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Issue Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -288,7 +288,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/license.html
----------------------------------------------------------------------
diff --git a/license.html b/license.html
index c34ff87..afba063 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Licenses</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -491,7 +491,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/mail-lists.html
----------------------------------------------------------------------
diff --git a/mail-lists.html b/mail-lists.html
index 9b49ee8..3c4260d 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Mailing Lists</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -341,7 +341,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/metrics.html
----------------------------------------------------------------------
diff --git a/metrics.html b/metrics.html
index c988270..5a60f34 100644
--- a/metrics.html
+++ b/metrics.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Apache HBase (TM) Metrics
@@ -459,7 +459,7 @@ export HBASE_REGIONSERVER_OPTS=&quot;$HBASE_JMX_OPTS -Dcom.sun.management.jmxrem
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/old_news.html
----------------------------------------------------------------------
diff --git a/old_news.html b/old_news.html
index 9534cc5..70fd4a5 100644
--- a/old_news.html
+++ b/old_news.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Old Apache HBase (TM) News
@@ -440,7 +440,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/plugin-management.html
----------------------------------------------------------------------
diff --git a/plugin-management.html b/plugin-management.html
index 40d1ffa..61cb343 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Plugin Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -440,7 +440,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/plugins.html
----------------------------------------------------------------------
diff --git a/plugins.html b/plugins.html
index 6f54c9c..4789590 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Plugins</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -375,7 +375,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/poweredbyhbase.html
----------------------------------------------------------------------
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index 014085f..9782cd8 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Powered By Apache HBase™</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -769,7 +769,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/project-info.html
----------------------------------------------------------------------
diff --git a/project-info.html b/project-info.html
index 75b7aab..009aab9 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Information</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -335,7 +335,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/project-reports.html
----------------------------------------------------------------------
diff --git a/project-reports.html b/project-reports.html
index 1828dc0..19e4b8e 100644
--- a/project-reports.html
+++ b/project-reports.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Generated Reports</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -305,7 +305,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/project-summary.html
----------------------------------------------------------------------
diff --git a/project-summary.html b/project-summary.html
index 10b45c1..0ff8532 100644
--- a/project-summary.html
+++ b/project-summary.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Summary</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -331,7 +331,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/pseudo-distributed.html
----------------------------------------------------------------------
diff --git a/pseudo-distributed.html b/pseudo-distributed.html
index ea86cc9..3c1456e 100644
--- a/pseudo-distributed.html
+++ b/pseudo-distributed.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
 Running Apache HBase (TM) in pseudo-distributed mode
@@ -308,7 +308,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/replication.html
----------------------------------------------------------------------
diff --git a/replication.html b/replication.html
index 4892191..fc2e377 100644
--- a/replication.html
+++ b/replication.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Apache HBase (TM) Replication
@@ -303,7 +303,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/resources.html
----------------------------------------------------------------------
diff --git a/resources.html b/resources.html
index 557c031..47ac2b9 100644
--- a/resources.html
+++ b/resources.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Other Apache HBase (TM) Resources</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -331,7 +331,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/source-repository.html
----------------------------------------------------------------------
diff --git a/source-repository.html b/source-repository.html
index ff643e6..6309f26 100644
--- a/source-repository.html
+++ b/source-repository.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Source Code Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -299,7 +299,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/sponsors.html
----------------------------------------------------------------------
diff --git a/sponsors.html b/sponsors.html
index 1ba4123..b4b70ec 100644
--- a/sponsors.html
+++ b/sponsors.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Apache HBase™ Sponsors</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -333,7 +333,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/supportingprojects.html
----------------------------------------------------------------------
diff --git a/supportingprojects.html b/supportingprojects.html
index 54455f5..7c8af8e 100644
--- a/supportingprojects.html
+++ b/supportingprojects.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Supporting Projects</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -520,7 +520,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/team-list.html
----------------------------------------------------------------------
diff --git a/team-list.html b/team-list.html
index 2f6fbe5..830fab3 100644
--- a/team-list.html
+++ b/team-list.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Team</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -730,7 +730,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.MyStoreScanner.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.MyStoreScanner.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.MyStoreScanner.html
index 8d6e61d..073b06f 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.MyStoreScanner.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.MyStoreScanner.html
@@ -206,7 +206,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.StoreScanner</h3>
-<code>checkFlushed, checkScanOrder, close, getEstimatedNumberOfKvsScanned, getNextIndexedKey, getReadPoint, getScannerOrder, next, next, peek, reopenAfterFlush, reseek, resetKVHeap, seek, seekAsDirection, seekScanners, seekToNextRow, shipped, trySkipToNextColumn, trySkipToNextRow, updateReaders</code></li>
+<code>checkFlushed, checkScanOrder, close, getEstimatedNumberOfKvsScanned, getNextIndexedKey, getReadPoint, next, next, peek, reopenAfterFlush, reseek, resetKVHeap, seek, seekAsDirection, seekScanners, seekToNextRow, shipped, trySkipToNextColumn, trySkipToNextRow, updateReaders</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -234,7 +234,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>backwardSeek, enforceSeek, getFilePath, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
+<code>backwardSeek, enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.html
index f213dfa..33ea213 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.html
@@ -195,7 +195,7 @@ extends org.apache.hadoop.hbase.util.CollectionBackedScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.util.CollectionBackedScanner</h3>
-<code>close, getScannerOrder, next, peek, reseek, seek</code></li>
+<code>close, next, peek, reseek, seek</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -218,6 +218,13 @@ extends org.apache.hadoop.hbase.util.CollectionBackedScanner</pre>
 <h3>Methods inherited from class&nbsp;java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></h3>
 <code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--" title="class or interface in java.lang">clone</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-" title="class or interface in java.lang">equals</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--" title="class or interface in java.lang">finalize</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--" title="class or interface in java.lang">getClass</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--" title="class or interface in java.lang">hashCode</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--" title="class or interface in java.lang">notify</a>, <a href="https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--" title="class or interface in java.lang">wait</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-" title="class or interface in java.lang">wait</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-" title="class or interface in java.lang">wait</a></code></li>
 </ul>
+<ul class="blockList">
+<li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.KeyValueScanner">
+<!--   -->
+</a>
+<h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
+<code>getScannerOrder</code></li>
+</ul>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.TestScanner.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.TestScanner.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.TestScanner.html
index 24d7470..09ab386 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.TestScanner.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.TestScanner.html
@@ -322,12 +322,6 @@ extends org.apache.hadoop.hbase.util.CollectionBackedScanner</pre>
 <li class="blockList">
 <h4>getScannerOrder</h4>
 <pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.TestScanner.html#line.229">getScannerOrder</a>()</pre>
-<dl>
-<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
-<dd><code>getScannerOrder</code>&nbsp;in interface&nbsp;<code>org.apache.hadoop.hbase.regionserver.KeyValueScanner</code></dd>
-<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
-<dd><code>getScannerOrder</code>&nbsp;in class&nbsp;<code>org.apache.hadoop.hbase.util.CollectionBackedScanner</code></dd>
-</dl>
 </li>
 </ul>
 <a name="close--">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
index adf810b..82a3353 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
@@ -224,7 +224,7 @@ extends org.apache.hadoop.hbase.regionserver.KeyValueHeap</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueHeap</h3>
-<code>close, getCurrentForTesting, getHeap, getNextIndexedKey, getScannerOrder, next, peek, pollRealKV, requestSeek, reseek, seek, shipped</code></li>
+<code>close, getCurrentForTesting, getHeap, getNextIndexedKey, next, peek, pollRealKV, requestSeek, reseek, seek, shipped</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -252,7 +252,7 @@ extends org.apache.hadoop.hbase.regionserver.KeyValueHeap</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>backwardSeek, enforceSeek, getFilePath, isFileScanner, realSeekDone, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
+<code>backwardSeek, enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatReversedKVHeap.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatReversedKVHeap.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatReversedKVHeap.html
index e9b64a2..d71558d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatReversedKVHeap.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatReversedKVHeap.html
@@ -232,7 +232,7 @@ extends org.apache.hadoop.hbase.regionserver.ReversedKeyValueHeap</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueHeap</h3>
-<code>close, getCurrentForTesting, getHeap, getNextIndexedKey, getScannerOrder, peek, pollRealKV, shipped</code></li>
+<code>close, getCurrentForTesting, getHeap, getNextIndexedKey, peek, pollRealKV, shipped</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner">
@@ -253,7 +253,7 @@ extends org.apache.hadoop.hbase.regionserver.ReversedKeyValueHeap</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>enforceSeek, getFilePath, isFileScanner, realSeekDone, shouldUseScanner</code></li>
+<code>enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellGridStoreScanner.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellGridStoreScanner.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellGridStoreScanner.html
index 2a7c0b0..533c1b2 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellGridStoreScanner.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellGridStoreScanner.html
@@ -232,7 +232,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.StoreScanner</h3>
-<code>checkFlushed, checkScanOrder, close, enableLazySeekGlobally, getAllScannersForTesting, getEstimatedNumberOfKvsScanned, getReadPoint, getScannerOrder, isScanUsePread, next, next, peek, reopenAfterFlush, reseek, seek, seekAsDirection, seekScanners, seekToNextRow, selectScannersFrom, shipped, trySwitchToStreamRead, updateReaders</code></li>
+<code>checkFlushed, checkScanOrder, close, enableLazySeekGlobally, getAllScannersForTesting, getEstimatedNumberOfKvsScanned, getReadPoint, isScanUsePread, next, next, peek, reopenAfterFlush, reseek, seek, seekAsDirection, seekScanners, seekToNextRow, selectScannersFrom, shipped, trySwitchToStreamRead, updateReaders</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -260,7 +260,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>backwardSeek, enforceSeek, getFilePath, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
+<code>backwardSeek, enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsNoOptimizeStoreScanner.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsNoOptimizeStoreScanner.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsNoOptimizeStoreScanner.html
index 73a20a6..36e891d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsNoOptimizeStoreScanner.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsNoOptimizeStoreScanner.html
@@ -217,7 +217,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.StoreScanner</h3>
-<code>checkFlushed, checkScanOrder, close, enableLazySeekGlobally, getAllScannersForTesting, getEstimatedNumberOfKvsScanned, getReadPoint, getScannerOrder, isScanUsePread, next, next, peek, reopenAfterFlush, reseek, resetKVHeap, seek, seekAsDirection, seekScanners, seekToNextRow, selectScannersFrom, shipped, trySkipToNextRow, trySwitchToStreamRead, updateReaders</code></li>
+<code>checkFlushed, checkScanOrder, close, enableLazySeekGlobally, getAllScannersForTesting, getEstimatedNumberOfKvsScanned, getReadPoint, isScanUsePread, next, next, peek, reopenAfterFlush, reseek, resetKVHeap, seek, seekAsDirection, seekScanners, seekToNextRow, selectScannersFrom, shipped, trySkipToNextRow, trySwitchToStreamRead, updateReaders</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -245,7 +245,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>backwardSeek, enforceSeek, getFilePath, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
+<code>backwardSeek, enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsStoreScanner.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsStoreScanner.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsStoreScanner.html
index 85e1e35..2f76db7 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsStoreScanner.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.CellWithVersionsStoreScanner.html
@@ -217,7 +217,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.StoreScanner</h3>
-<code>checkFlushed, checkScanOrder, close, enableLazySeekGlobally, getAllScannersForTesting, getEstimatedNumberOfKvsScanned, getReadPoint, getScannerOrder, isScanUsePread, next, next, peek, reopenAfterFlush, reseek, resetKVHeap, seek, seekAsDirection, seekScanners, seekToNextRow, selectScannersFrom, shipped, trySkipToNextRow, trySwitchToStreamRead, updateReaders</code></li>
+<code>checkFlushed, checkScanOrder, close, enableLazySeekGlobally, getAllScannersForTesting, getEstimatedNumberOfKvsScanned, getReadPoint, isScanUsePread, next, next, peek, reopenAfterFlush, reseek, resetKVHeap, seek, seekAsDirection, seekScanners, seekToNextRow, selectScannersFrom, shipped, trySkipToNextRow, trySwitchToStreamRead, updateReaders</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -245,7 +245,7 @@ extends org.apache.hadoop.hbase.regionserver.StoreScanner</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>backwardSeek, enforceSeek, getFilePath, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
+<code>backwardSeek, enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, requestSeek, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.KeyValueHeapWithCount.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.KeyValueHeapWithCount.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.KeyValueHeapWithCount.html
index 1803ef6..0cbc3ff 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.KeyValueHeapWithCount.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStoreScanner.KeyValueHeapWithCount.html
@@ -229,7 +229,7 @@ extends org.apache.hadoop.hbase.regionserver.KeyValueHeap</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueHeap</h3>
-<code>close, getCurrentForTesting, getHeap, getNextIndexedKey, getScannerOrder, next, next, pollRealKV, requestSeek, reseek, seek, shipped</code></li>
+<code>close, getCurrentForTesting, getHeap, getNextIndexedKey, next, next, pollRealKV, requestSeek, reseek, seek, shipped</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -257,7 +257,7 @@ extends org.apache.hadoop.hbase.regionserver.KeyValueHeap</pre>
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.KeyValueScanner</h3>
-<code>backwardSeek, enforceSeek, getFilePath, isFileScanner, realSeekDone, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
+<code>backwardSeek, enforceSeek, getFilePath, getScannerOrder, isFileScanner, realSeekDone, seekToLastRow, seekToPreviousRow, shouldUseScanner</code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">


[39/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/checkstyle.rss
----------------------------------------------------------------------
diff --git a/checkstyle.rss b/checkstyle.rss
index f6ec33a..2da7c60 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
     <copyright>&#169;2007 - 2018 The Apache Software Foundation</copyright>
     <item>
       <title>File: 3604,
-             Errors: 15865,
+             Errors: 15864,
              Warnings: 0,
              Infos: 0
       </title>
@@ -9141,7 +9141,7 @@ under the License.
                   0
                 </td>
                 <td>
-                  3
+                  1
                 </td>
               </tr>
                           <tr>
@@ -13593,7 +13593,7 @@ under the License.
                   0
                 </td>
                 <td>
-                  3
+                  2
                 </td>
               </tr>
                           <tr>
@@ -49069,7 +49069,7 @@ under the License.
                   0
                 </td>
                 <td>
-                  97
+                  99
                 </td>
               </tr>
                           <tr>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/coc.html
----------------------------------------------------------------------
diff --git a/coc.html b/coc.html
index fe59c86..c11a645 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Code of Conduct Policy
@@ -375,7 +375,7 @@ email to <a class="externalLink" href="mailto:private@hbase.apache.org">the priv
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/dependencies.html
----------------------------------------------------------------------
diff --git a/dependencies.html b/dependencies.html
index c0468f5..917c2c5 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Dependencies</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -440,7 +440,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/dependency-convergence.html
----------------------------------------------------------------------
diff --git a/dependency-convergence.html b/dependency-convergence.html
index cbaeedd..4af4e18 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Reactor Dependency Convergence</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -1105,7 +1105,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/dependency-info.html
----------------------------------------------------------------------
diff --git a/dependency-info.html b/dependency-info.html
index 07601ac..0c1d73a 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Dependency Information</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -313,7 +313,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/dependency-management.html
----------------------------------------------------------------------
diff --git a/dependency-management.html b/dependency-management.html
index 774e44c..7806822 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Dependency Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -969,7 +969,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/constant-values.html
----------------------------------------------------------------------
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index 929f969..d4d265b 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3768,21 +3768,21 @@
 <!--   -->
 </a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/Version.html#date">date</a></code></td>
-<td class="colLast"><code>"Wed Apr 18 14:38:53 UTC 2018"</code></td>
+<td class="colLast"><code>"Thu Apr 19 14:39:00 UTC 2018"</code></td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.Version.revision">
 <!--   -->
 </a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/Version.html#revision">revision</a></code></td>
-<td class="colLast"><code>"f4f2b68238a094d7b1931dc8b7939742ccbb2b57"</code></td>
+<td class="colLast"><code>"556b22374423ff087c0583d02ae4298d4d4f2e6b"</code></td>
 </tr>
 <tr class="altColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.Version.srcChecksum">
 <!--   -->
 </a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/Version.html#srcChecksum">srcChecksum</a></code></td>
-<td class="colLast"><code>"140c55771a388fd58f0c3c7100fa35b2"</code></td>
+<td class="colLast"><code>"83ef0b63e39df660933d8e09ab06a005"</code></td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.Version.url">
@@ -15163,27 +15163,6 @@
 <ul class="blockList">
 <li class="blockList">
 <table class="constantsSummary" border="0" cellpadding="3" cellspacing="0" summary="Constant Field Values table, listing constant fields, and values">
-<caption><span>org.apache.hadoop.hbase.master.normalizer.<a href="org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html" title="class in org.apache.hadoop.hbase.master.normalizer">SimpleRegionNormalizer</a></span><span class="tabEnd">&nbsp;</span></caption>
-<tr>
-<th class="colFirst" scope="col">Modifier and Type</th>
-<th scope="col">Constant Field</th>
-<th class="colLast" scope="col">Value</th>
-</tr>
-<tbody>
-<tr class="altColor">
-<td class="colFirst"><a name="org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MIN_REGION_COUNT">
-<!--   -->
-</a><code>private&nbsp;static&nbsp;final&nbsp;int</code></td>
-<td><code><a href="org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#MIN_REGION_COUNT">MIN_REGION_COUNT</a></code></td>
-<td class="colLast"><code>3</code></td>
-</tr>
-</tbody>
-</table>
-</li>
-</ul>
-<ul class="blockList">
-<li class="blockList">
-<table class="constantsSummary" border="0" cellpadding="3" cellspacing="0" summary="Constant Field Values table, listing constant fields, and values">
 <caption><span>org.apache.hadoop.hbase.master.procedure.<a href="org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.html" title="class in org.apache.hadoop.hbase.master.procedure">MasterProcedureConstants</a></span><span class="tabEnd">&nbsp;</span></caption>
 <tr>
 <th class="colFirst" scope="col">Modifier and Type</th>
@@ -21590,25 +21569,6 @@
 </li>
 <li class="blockList">
 <table class="constantsSummary" border="0" cellpadding="3" cellspacing="0" summary="Constant Field Values table, listing constant fields, and values">
-<caption><span>org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html" title="class in org.apache.hadoop.hbase.regionserver">SegmentScanner</a></span><span class="tabEnd">&nbsp;</span></caption>
-<tr>
-<th class="colFirst" scope="col">Modifier and Type</th>
-<th scope="col">Constant Field</th>
-<th class="colLast" scope="col">Value</th>
-</tr>
-<tbody>
-<tr class="altColor">
-<td class="colFirst"><a name="org.apache.hadoop.hbase.regionserver.SegmentScanner.DEFAULT_SCANNER_ORDER">
-<!--   -->
-</a><code>private&nbsp;static&nbsp;final&nbsp;long</code></td>
-<td><code><a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html#DEFAULT_SCANNER_ORDER">DEFAULT_SCANNER_ORDER</a></code></td>
-<td class="colLast"><code>9223372036854775807L</code></td>
-</tr>
-</tbody>
-</table>
-</li>
-<li class="blockList">
-<table class="constantsSummary" border="0" cellpadding="3" cellspacing="0" summary="Constant Field Values table, listing constant fields, and values">
 <caption><span>org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SequenceId.html" title="interface in org.apache.hadoop.hbase.regionserver">SequenceId</a></span><span class="tabEnd">&nbsp;</span></caption>
 <tr>
 <th class="colFirst" scope="col">Modifier and Type</th>
@@ -28247,6 +28207,25 @@
 </tbody>
 </table>
 </li>
+<li class="blockList">
+<table class="constantsSummary" border="0" cellpadding="3" cellspacing="0" summary="Constant Field Values table, listing constant fields, and values">
+<caption><span>org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/VersionInfo.html" title="class in org.apache.hadoop.hbase.util">VersionInfo</a></span><span class="tabEnd">&nbsp;</span></caption>
+<tr>
+<th class="colFirst" scope="col">Modifier and Type</th>
+<th scope="col">Constant Field</th>
+<th class="colLast" scope="col">Value</th>
+</tr>
+<tbody>
+<tr class="altColor">
+<td class="colFirst"><a name="org.apache.hadoop.hbase.util.VersionInfo.VERY_LARGE_NUMBER">
+<!--   -->
+</a><code>private&nbsp;static&nbsp;final&nbsp;int</code></td>
+<td><code><a href="org/apache/hadoop/hbase/util/VersionInfo.html#VERY_LARGE_NUMBER">VERY_LARGE_NUMBER</a></code></td>
+<td class="colLast"><code>100000</code></td>
+</tr>
+</tbody>
+</table>
+</li>
 </ul>
 <ul class="blockList">
 <li class="blockList">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/index-all.html
----------------------------------------------------------------------
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index b6338fb..c2c0c99 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -2556,9 +2556,9 @@
 </dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html#addToRunQueue-org.apache.hadoop.hbase.master.procedure.FairQueue-org.apache.hadoop.hbase.master.procedure.Queue-">addToRunQueue(FairQueue&lt;T&gt;, Queue&lt;T&gt;)</a></span> - Static method in class org.apache.hadoop.hbase.master.procedure.<a href="org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html" title="class in org.apache.hadoop.hbase.master.procedure">MasterProcedureScheduler</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-long-java.util.List-">addToScanners(List&lt;? extends Segment&gt;, long, long, List&lt;KeyValueScanner&gt;)</a></span> - Static method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">AbstractMemStore</a></dt>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-java.util.List-">addToScanners(List&lt;? extends Segment&gt;, long, List&lt;KeyValueScanner&gt;)</a></span> - Static method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">AbstractMemStore</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">addToScanners(Segment, long, long, List&lt;KeyValueScanner&gt;)</a></span> - Static method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">AbstractMemStore</a></dt>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">addToScanners(Segment, long, List&lt;KeyValueScanner&gt;)</a></span> - Static method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/AbstractMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">AbstractMemStore</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.html#addToSize-long-">addToSize(long)</a></span> - Method in class org.apache.hadoop.hbase.procedure2.store.wal.<a href="org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.html" title="class in org.apache.hadoop.hbase.procedure2.store.wal">ProcedureWALFile</a></dt>
 <dd>
@@ -23291,8 +23291,6 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html#DEFAULT_RS_RPC_STARTUP_WAIT_TIME">DEFAULT_RS_RPC_STARTUP_WAIT_TIME</a></span> - Static variable in class org.apache.hadoop.hbase.master.procedure.<a href="org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html" title="class in org.apache.hadoop.hbase.master.procedure">RSProcedureDispatcher</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html#DEFAULT_SCANNER_ORDER">DEFAULT_SCANNER_ORDER</a></span> - Static variable in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html" title="class in org.apache.hadoop.hbase.regionserver">SegmentScanner</a></dt>
-<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html#DEFAULT_SCOPE">DEFAULT_SCOPE</a></span> - Static variable in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html" title="class in org.apache.hadoop.hbase.regionserver">ScannerContext.LimitFields</a></dt>
 <dd>
 <div class="block">Default scope that is assigned to a limit if a scope is not specified.</div>
@@ -47839,10 +47837,6 @@
 <dd>
 <div class="block">Creates the scanner for the given read point</div>
 </dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanner-long-long-">getScanner(long, long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">CompositeImmutableSegment</a></dt>
-<dd>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/HRegion.html#getScanner-org.apache.hadoop.hbase.client.Scan-">getScanner(Scan)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/HRegion.html" title="class in org.apache.hadoop.hbase.regionserver">HRegion</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/HRegion.html#getScanner-org.apache.hadoop.hbase.client.Scan-java.util.List-">getScanner(Scan, List&lt;KeyValueScanner&gt;)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/HRegion.html" title="class in org.apache.hadoop.hbase.regionserver">HRegion</a></dt>
@@ -47869,10 +47863,6 @@
 <dd>
 <div class="block">Creates the scanner for the given read point</div>
 </dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner(long, long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></dt>
-<dd>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/StoreFileReader.html#getScanner-boolean-boolean-">getScanner(boolean, boolean)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/StoreFileReader.html" title="class in org.apache.hadoop.hbase.regionserver">StoreFileReader</a></dt>
 <dd>
 <div class="block"><span class="deprecatedLabel">Deprecated.</span>
@@ -47919,20 +47909,12 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/client/ConnectionConfiguration.html#getScannerMaxResultSize--">getScannerMaxResultSize()</a></span> - Method in class org.apache.hadoop.hbase.client.<a href="org/apache/hadoop/hbase/client/ConnectionConfiguration.html" title="class in org.apache.hadoop.hbase.client">ConnectionConfiguration</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getScannerOrder--">getScannerOrder()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/KeyValueHeap.html" title="class in org.apache.hadoop.hbase.regionserver">KeyValueHeap</a></dt>
-<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder()</a></span> - Method in interface org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></dt>
 <dd>
 <div class="block">Get the order of this KeyValueScanner.</div>
 </dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html#getScannerOrder--">getScannerOrder()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html" title="class in org.apache.hadoop.hbase.regionserver">SegmentScanner</a></dt>
-<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/StoreFileScanner.html#getScannerOrder--">getScannerOrder()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/StoreFileScanner.html" title="class in org.apache.hadoop.hbase.regionserver">StoreFileScanner</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/StoreScanner.html#getScannerOrder--">getScannerOrder()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/StoreScanner.html" title="class in org.apache.hadoop.hbase.regionserver">StoreScanner</a></dt>
-<dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/CollectionBackedScanner.html#getScannerOrder--">getScannerOrder()</a></span> - Method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/CollectionBackedScanner.html" title="class in org.apache.hadoop.hbase.util">CollectionBackedScanner</a></dt>
-<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/rest/TableResource.html#getScannerResource--">getScannerResource()</a></span> - Method in class org.apache.hadoop.hbase.rest.<a href="org/apache/hadoop/hbase/rest/TableResource.html" title="class in org.apache.hadoop.hbase.rest">TableResource</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html#getScannerResults-java.nio.ByteBuffer-org.apache.hadoop.hbase.thrift2.generated.TScan-int-">getScannerResults(ByteBuffer, TScan, int)</a></span> - Method in class org.apache.hadoop.hbase.thrift2.<a href="org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html" title="class in org.apache.hadoop.hbase.thrift2">ThriftHBaseServiceHandler</a></dt>
@@ -47941,7 +47923,7 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#getScanners-long-">getScanners(long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/CompactingMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">CompactingMemStore</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanners-long-long-">getScanners(long, long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">CompositeImmutableSegment</a></dt>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanners-long-">getScanners(long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">CompositeImmutableSegment</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#getScanners-long-">getScanners(long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/DefaultMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">DefaultMemStore</a></dt>
 <dd>&nbsp;</dd>
@@ -47967,7 +47949,7 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html#getScanners--">getScanners()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSnapshot</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners(long, long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></dt>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners(long)</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/RSRpcServices.html#getScannersCount--">getScannersCount()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/RSRpcServices.html" title="class in org.apache.hadoop.hbase.regionserver">RSRpcServices</a></dt>
 <dd>&nbsp;</dd>
@@ -52693,6 +52675,11 @@
 <div class="block">Returns the version components
  Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"]</div>
 </dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/VersionInfo.html#getVersionComponents-java.lang.String-">getVersionComponents(String)</a></span> - Static method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/VersionInfo.html" title="class in org.apache.hadoop.hbase.util">VersionInfo</a></dt>
+<dd>
+<div class="block">Returns the version components as Integer and String objects
+ Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"]</div>
+</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/ServerName.html#getVersionedBytes--">getVersionedBytes()</a></span> - Method in class org.apache.hadoop.hbase.<a href="org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/CompactionPipeline.html#getVersionedList--">getVersionedList()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/CompactionPipeline.html" title="class in org.apache.hadoop.hbase.regionserver">CompactionPipeline</a></dt>
@@ -62517,6 +62504,8 @@
 <div class="block">Indicates to the client whether this task is monitoring a currently active 
  RPC call to a database command.</div>
 </dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#isOptionsSupported-java.lang.String:A-">isOptionsSupported(String[])</a></span> - Method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/types/DataType.html#isOrderPreserving--">isOrderPreserving()</a></span> - Method in interface org.apache.hadoop.hbase.types.<a href="org/apache/hadoop/hbase/types/DataType.html" title="interface in org.apache.hadoop.hbase.types">DataType</a></dt>
 <dd>
 <div class="block">Indicates whether this instance writes encoded <code>byte[]</code>'s
@@ -73206,8 +73195,6 @@
 </dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/constraint/Constraints.html#MIN_PRIORITY">MIN_PRIORITY</a></span> - Static variable in class org.apache.hadoop.hbase.constraint.<a href="org/apache/hadoop/hbase/constraint/Constraints.html" title="class in org.apache.hadoop.hbase.constraint">Constraints</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#MIN_REGION_COUNT">MIN_REGION_COUNT</a></span> - Static variable in class org.apache.hadoop.hbase.master.normalizer.<a href="org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html" title="class in org.apache.hadoop.hbase.master.normalizer">SimpleRegionNormalizer</a></dt>
-<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html#MIN_SERVER_BALANCE">MIN_SERVER_BALANCE</a></span> - Static variable in class org.apache.hadoop.hbase.master.balancer.<a href="org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html" title="class in org.apache.hadoop.hbase.master.balancer">BaseLoadBalancer</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html#MIN_STEP_KEY">MIN_STEP_KEY</a></span> - Static variable in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.html" title="class in org.apache.hadoop.hbase.regionserver">DefaultHeapMemoryTuner</a></dt>
@@ -73354,6 +73341,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.WalProcedureMap.html#minProcId">minProcId</a></span> - Variable in class org.apache.hadoop.hbase.procedure2.store.wal.<a href="org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.WalProcedureMap.html" title="class in org.apache.hadoop.hbase.procedure2.store.wal">ProcedureWALFormatReader.WalProcedureMap</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#minRegionCount">minRegionCount</a></span> - Variable in class org.apache.hadoop.hbase.master.normalizer.<a href="org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html" title="class in org.apache.hadoop.hbase.master.normalizer">SimpleRegionNormalizer</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/AssignmentVerificationReport.html#minRegionsOnRS">minRegionsOnRS</a></span> - Variable in class org.apache.hadoop.hbase.master.<a href="org/apache/hadoop/hbase/master/AssignmentVerificationReport.html" title="class in org.apache.hadoop.hbase.master">AssignmentVerificationReport</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html#minSeqIdToKeep">minSeqIdToKeep</a></span> - Variable in class org.apache.hadoop.hbase.regionserver.compactions.<a href="org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html" title="class in org.apache.hadoop.hbase.regionserver.compactions">Compactor.FileDetails</a></dt>
@@ -96274,10 +96263,6 @@ service.</div>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html#scannerOpenWithStopTs-java.nio.ByteBuffer-java.nio.ByteBuffer-java.nio.ByteBuffer-java.util.List-long-java.util.Map-">scannerOpenWithStopTs(ByteBuffer, ByteBuffer, ByteBuffer, List&lt;ByteBuffer&gt;, long, Map&lt;ByteBuffer, ByteBuffer&gt;)</a></span> - Method in class org.apache.hadoop.hbase.thrift.<a href="org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html" title="class in org.apache.hadoop.hbase.thrift">ThriftServerRunner.HBaseHandler</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html#scannerOrder">scannerOrder</a></span> - Variable in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html" title="class in org.apache.hadoop.hbase.regionserver">SegmentScanner</a></dt>
-<dd>
-<div class="block">Order of this scanner relative to other scanners.</div>
-</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/StoreFileScanner.html#scannerOrder">scannerOrder</a></span> - Variable in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/StoreFileScanner.html" title="class in org.apache.hadoop.hbase.regionserver">StoreFileScanner</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/security/access/AccessController.html#scannerOwners">scannerOwners</a></span> - Variable in class org.apache.hadoop.hbase.security.access.<a href="org/apache/hadoop/hbase/security/access/AccessController.html" title="class in org.apache.hadoop.hbase.security.access">AccessController</a></dt>
@@ -96898,9 +96883,9 @@ service.</div>
 <div class="block">A scanner of a single memstore segment.</div>
 </dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html#SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-">SegmentScanner(Segment, long)</a></span> - Constructor for class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html" title="class in org.apache.hadoop.hbase.regionserver">SegmentScanner</a></dt>
-<dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html#SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-long-">SegmentScanner(Segment, long, long)</a></span> - Constructor for class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/SegmentScanner.html" title="class in org.apache.hadoop.hbase.regionserver">SegmentScanner</a></dt>
-<dd>&nbsp;</dd>
+<dd>
+<div class="block">Scanners are ordered from 0 (oldest) to newest in increasing order.</div>
+</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/Segment.html#segmentSize">segmentSize</a></span> - Variable in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.html#segregateRegionsAndAssignRegionsWithFavoredNodes-java.util.List-java.util.List-">segregateRegionsAndAssignRegionsWithFavoredNodes(List&lt;RegionInfo&gt;, List&lt;ServerName&gt;)</a></span> - Method in class org.apache.hadoop.hbase.favored.<a href="org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.html" title="class in org.apache.hadoop.hbase.favored">FavoredNodeLoadBalancer</a></dt>
@@ -114828,6 +114813,8 @@ service.</div>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.html#UnsupportedCryptoException-java.lang.String-java.lang.Throwable-">UnsupportedCryptoException(String, Throwable)</a></span> - Constructor for exception org.apache.hadoop.hbase.ipc.<a href="org/apache/hadoop/hbase/ipc/UnsupportedCryptoException.html" title="class in org.apache.hadoop.hbase.ipc">UnsupportedCryptoException</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#unsupportedOptionsInV2">unsupportedOptionsInV2</a></span> - Static variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html#unthrottleNamespace-java.lang.String-">unthrottleNamespace(String)</a></span> - Static method in class org.apache.hadoop.hbase.quotas.<a href="org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html" title="class in org.apache.hadoop.hbase.quotas">QuotaSettingsFactory</a></dt>
 <dd>
 <div class="block">Remove the throttling for the specified namespace.</div>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index f2790ae..249def3 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -167,10 +167,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupInfo.BackupPhase.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupInfo.BackupPhase</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupType.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupInfo.BackupState.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupInfo.BackupState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupRestoreConstants.BackupCommand</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupInfo.BackupPhase.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupInfo.BackupPhase</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupType.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 39ca705..ad8d01b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -550,24 +550,24 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AbstractResponse.ResponseType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AbstractResponse.ResponseType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">MobCompactPartitionPolicy</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Scan.ReadType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Scan.ReadType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/RegionLocateType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">RegionLocateType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">MobCompactPartitionPolicy</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncProcessTask.SubmittedRows.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncProcessTask.SubmittedRows</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactionState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactionState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/IsolationLevel.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">IsolationLevel</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/ScannerCallable.MoreResults.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">ScannerCallable.MoreResults</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/SnapshotType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">SnapshotType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Durability.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Durability</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactionState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactionState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/TableState.State.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">TableState.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/RequestController.ReturnCode.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">RequestController.ReturnCode</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncRequestFutureImpl.Retry</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/ScannerCallable.MoreResults.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">ScannerCallable.MoreResults</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/SnapshotType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">SnapshotType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AbstractResponse.ResponseType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AbstractResponse.ResponseType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Consistency.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Consistency</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncRequestFutureImpl.Retry</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Scan.ReadType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Scan.ReadType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/MasterSwitchType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">MasterSwitchType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/TableState.State.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">TableState.State</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 1ee8f20..ff7405b 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -183,14 +183,14 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/BitComparator.BitwiseOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">BitComparator.BitwiseOp</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.SatisfiesCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.SatisfiesCode</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.Order</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">CompareFilter.CompareOp</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterList.Operator.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterList.Operator</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterWrapper.FilterRowRetCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterWrapper.FilterRowRetCode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/Filter.ReturnCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">Filter.ReturnCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">CompareFilter.CompareOp</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.SatisfiesCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.SatisfiesCode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">RegexStringComparator.EngineType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterWrapper.FilterRowRetCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterWrapper.FilterRowRetCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterList.Operator.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterList.Operator</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.Order</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/BitComparator.BitwiseOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">BitComparator.BitwiseOp</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 90b418b..54d2030 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -273,12 +273,12 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">CacheConfig.ExternalBlockCaches</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">HFileBlock.Writer.State</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockPriority.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockPriority</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockType.BlockCategory.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockType.BlockCategory</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">CacheConfig.ExternalBlockCaches</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockPriority.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockPriority</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/Cacheable.MemoryType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">Cacheable.MemoryType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockType.BlockCategory.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockType.BlockCategory</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index 1f90dde..7600159 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -349,8 +349,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/CallEvent.Type.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">CallEvent.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.SourceStorage.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">MetricsHBaseServerSourceFactoryImpl.SourceStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/CallEvent.Type.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">CallEvent.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.BufferCallAction.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">BufferCallBeforeInitHandler.BufferCallAction</span></a></li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
index dfed8e7..a07a1d8 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
@@ -293,10 +293,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.Counters.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">RowCounter.RowCounterMapper.Counters</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/TableSplit.Version.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">TableSplit.Version</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">CellCounter.CellCounterMapper.Counters</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/TableSplit.Version.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">TableSplit.Version</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.Counter.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">SyncTable.SyncMapper.Counter</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.Counters.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">RowCounter.RowCounterMapper.Counters</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html b/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
index 906a0f1..e0b4007 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.96">SimpleRegionNormalizer.PlanComparator</a>
+<pre>static class <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.100">SimpleRegionNormalizer.PlanComparator</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true" title="class or interface in java.util">Comparator</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&gt;</pre>
 <div class="block">Comparator class that gives higher priority to region Split plan.</div>
@@ -196,7 +196,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Comparat
 <ul class="blockListLast">
 <li class="blockList">
 <h4>PlanComparator</h4>
-<pre><a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html#line.96">PlanComparator</a>()</pre>
+<pre><a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html#line.100">PlanComparator</a>()</pre>
 </li>
 </ul>
 </li>
@@ -213,7 +213,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Comparat
 <ul class="blockListLast">
 <li class="blockList">
 <h4>compare</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html#line.98">compare</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&nbsp;plan1,
+<pre>public&nbsp;int&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html#line.102">compare</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&nbsp;plan1,
                    <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&nbsp;plan2)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html b/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
index daac428..1c1e2b3 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 <hr>
 <br>
 <pre>@InterfaceAudience.Private
-public class <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.61">SimpleRegionNormalizer</a>
+public class <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.62">SimpleRegionNormalizer</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html" title="interface in org.apache.hadoop.hbase.master.normalizer">RegionNormalizer</a></pre>
 <div class="block">Simple implementation of region normalizer.
@@ -186,8 +186,8 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#masterServices">masterServices</a></span></code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
-<td class="colFirst"><code>private static int</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#MIN_REGION_COUNT">MIN_REGION_COUNT</a></span></code>&nbsp;</td>
+<td class="colFirst"><code>private int</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#minRegionCount">minRegionCount</a></span></code>&nbsp;</td>
 </tr>
 <tr class="altColor">
 <td class="colFirst"><code>private <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true" title="class or interface in java.util">Comparator</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&gt;</code></td>
@@ -291,20 +291,16 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>LOG</h4>
-<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.63">LOG</a></pre>
+<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.64">LOG</a></pre>
 </li>
 </ul>
-<a name="MIN_REGION_COUNT">
+<a name="minRegionCount">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
-<h4>MIN_REGION_COUNT</h4>
-<pre>private static final&nbsp;int <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.64">MIN_REGION_COUNT</a></pre>
-<dl>
-<dt><span class="seeLabel">See Also:</span></dt>
-<dd><a href="../../../../../../constant-values.html#org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer.MIN_REGION_COUNT">Constant Field Values</a></dd>
-</dl>
+<h4>minRegionCount</h4>
+<pre>private&nbsp;int <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.65">minRegionCount</a></pre>
 </li>
 </ul>
 <a name="masterServices">
@@ -313,7 +309,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>masterServices</h4>
-<pre>private&nbsp;<a href="../../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a> <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.65">masterServices</a></pre>
+<pre>private&nbsp;<a href="../../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a> <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.66">masterServices</a></pre>
 </li>
 </ul>
 <a name="masterRpcServices">
@@ -322,7 +318,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>masterRpcServices</h4>
-<pre>private&nbsp;<a href="../../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.html" title="class in org.apache.hadoop.hbase.master">MasterRpcServices</a> <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.66">masterRpcServices</a></pre>
+<pre>private&nbsp;<a href="../../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.html" title="class in org.apache.hadoop.hbase.master">MasterRpcServices</a> <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.67">masterRpcServices</a></pre>
 </li>
 </ul>
 <a name="skippedCount">
@@ -331,7 +327,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>skippedCount</h4>
-<pre>private static&nbsp;long[] <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.67">skippedCount</a></pre>
+<pre>private static&nbsp;long[] <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.68">skippedCount</a></pre>
 </li>
 </ul>
 <a name="planComparator">
@@ -340,7 +336,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockListLast">
 <li class="blockList">
 <h4>planComparator</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true" title="class or interface in java.util">Comparator</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&gt; <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.113">planComparator</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true" title="class or interface in java.util">Comparator</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&gt; <a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.117">planComparator</a></pre>
 </li>
 </ul>
 </li>
@@ -357,7 +353,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockListLast">
 <li class="blockList">
 <h4>SimpleRegionNormalizer</h4>
-<pre>public&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.61">SimpleRegionNormalizer</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.70">SimpleRegionNormalizer</a>()</pre>
 </li>
 </ul>
 </li>
@@ -374,7 +370,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>setMasterServices</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.74">setMasterServices</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a>&nbsp;masterServices)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.78">setMasterServices</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a>&nbsp;masterServices)</pre>
 <div class="block">Set the master service.</div>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -390,7 +386,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>setMasterRpcServices</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.79">setMasterRpcServices</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.html" title="class in org.apache.hadoop.hbase.master">MasterRpcServices</a>&nbsp;masterRpcServices)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.83">setMasterRpcServices</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.html" title="class in org.apache.hadoop.hbase.master">MasterRpcServices</a>&nbsp;masterRpcServices)</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html#setMasterRpcServices-org.apache.hadoop.hbase.master.MasterRpcServices-">RegionNormalizer</a></code></span></div>
 <div class="block">Set the master RPC service. Must be called before first call to
  <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html#computePlanForTable-org.apache.hadoop.hbase.TableName-"><code>RegionNormalizer.computePlanForTable(TableName)</code></a>.</div>
@@ -408,7 +404,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>planSkipped</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.84">planSkipped</a>(<a href="../../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.88">planSkipped</a>(<a href="../../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri,
                         <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.PlanType.html" title="enum in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan.PlanType</a>&nbsp;type)</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html#planSkipped-org.apache.hadoop.hbase.client.RegionInfo-org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType-">RegionNormalizer</a></code></span></div>
 <div class="block">Notification for the case where plan couldn't be executed due to constraint violation, such as
@@ -428,7 +424,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>getSkippedCount</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.89">getSkippedCount</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.PlanType.html" title="enum in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan.PlanType</a>&nbsp;type)</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.93">getSkippedCount</a>(<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.PlanType.html" title="enum in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan.PlanType</a>&nbsp;type)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html#getSkippedCount-org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType-">getSkippedCount</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html" title="interface in org.apache.hadoop.hbase.master.normalizer">RegionNormalizer</a></code></dd>
@@ -445,7 +441,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockList">
 <li class="blockList">
 <h4>computePlanForTable</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&gt;&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.123">computePlanForTable</a>(<a href="../../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;table)
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.html" title="interface in org.apache.hadoop.hbase.master.normalizer">NormalizationPlan</a>&gt;&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.127">computePlanForTable</a>(<a href="../../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;table)
                                             throws <a href="../../../../../../org/apache/hadoop/hbase/HBaseIOException.html" title="class in org.apache.hadoop.hbase">HBaseIOException</a></pre>
 <div class="block">Computes next most "urgent" normalization action on the table.
  Action may be either a split, or a merge, or no action.</div>
@@ -467,7 +463,7 @@ implements <a href="../../../../../../org/apache/hadoop/hbase/master/normalizer/
 <ul class="blockListLast">
 <li class="blockList">
 <h4>getRegionSize</h4>
-<pre>private&nbsp;long&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.212">getRegionSize</a>(<a href="../../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri)</pre>
+<pre>private&nbsp;long&nbsp;<a href="../../../../../../src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html#line.216">getRegionSize</a>(<a href="../../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri)</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 7b4b20d..79c34d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -346,11 +346,11 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">SplitLogManager.TerminationStatus</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">MasterRpcServices.BalanceSwitchMode</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/RegionState.State.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">RegionState.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">MetricsMasterSourceFactoryImpl.FactoryStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/RegionState.State.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">RegionState.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">MasterRpcServices.BalanceSwitchMode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">SplitLogManager.ResubmitDirective</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">SplitLogManager.TerminationStatus</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 0fa081d..2d4a29c 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -208,9 +208,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.ServerOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">ServerProcedureInterface.ServerOperationType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">PeerProcedureInterface.PeerOperationType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.TableOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">TableProcedureInterface.TableOperationType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">PeerProcedureInterface.PeerOperationType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.ServerOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">ServerProcedureInterface.ServerOperationType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
index 9eb46aa..3e1ea7f 100644
--- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
@@ -125,8 +125,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.monitoring.<a href="../../../../../org/apache/hadoop/hbase/monitoring/MonitoredTask.State.html" title="enum in org.apache.hadoop.hbase.monitoring"><span class="typeNameLink">MonitoredTask.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.monitoring.<a href="../../../../../org/apache/hadoop/hbase/monitoring/TaskMonitor.TaskFilter.TaskType.html" title="enum in org.apache.hadoop.hbase.monitoring"><span class="typeNameLink">TaskMonitor.TaskFilter.TaskType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.monitoring.<a href="../../../../../org/apache/hadoop/hbase/monitoring/MonitoredTask.State.html" title="enum in org.apache.hadoop.hbase.monitoring"><span class="typeNameLink">MonitoredTask.State</span></a></li>
 </ul>
 </li>
 </ul>


[42/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/apache_hbase_reference_guide.pdf
----------------------------------------------------------------------
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 626805d..14ebd1a 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20180418144412+00'00')
-/CreationDate (D:20180418144412+00'00')
+/ModDate (D:20180419144425+00'00')
+/CreationDate (D:20180419144425+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 26 0 R
-/Outlines 4615 0 R
-/PageLabels 4841 0 R
+/Outlines 4616 0 R
+/PageLabels 4842 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -24,7 +24,7 @@ endobj
 3 0 obj
 << /Type /Pages
 /Count 721
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 0 R 54 0 R 61 0 R 63 0 R 67 0 R 69 0 R 71 0 R 78 0 R 81 0 R 83 0 R 89 0 R 92 0 R 94 0 R 96 0 R 103 0 R 110 0 R 115 0 R 117 0 R 133 0 R 138 0 R 146 0 R 155 0 R 163 0 R 172 0 R 183 0 R 187 0 R 189 0 R 193 0 R 202 0 R 211 0 R 219 0 R 228 0 R 233 0 R 242 0 R 250 0 R 259 0 R 272 0 R 279 0 R 289 0 R 297 0 R 305 0 R 312 0 R 320 0 R 326 0 R 332 0 R 339 0 R 347 0 R 358 0 R 367 0 R 379 0 R 387 0 R 395 0 R 402 0 R 411 0 R 419 0 R 429 0 R 437 0 R 444 0 R 453 0 R 465 0 R 474 0 R 481 0 R 489 0 R 497 0 R 506 0 R 513 0 R 518 0 R 522 0 R 527 0 R 531 0 R 547 0 R 558 0 R 562 0 R 577 0 R 582 0 R 587 0 R 589 0 R 591 0 R 594 0 R 596 0 R 598 0 R 606 0 R 612 0 R 617 0 R 622 0 R 629 0 R 639 0 R 647 0 R 651 0 R 655 0 R 657 0 R 667 0 R 681 0 R 690 0 R 699 0 R 709 0 R 720 0 R 731 0 R 750 0 R 756 0 R 759 0 R 765 0 R 768 0 R 772 0 R 776 0 R 779 0 R 782 0 R 784 0 R 787 0 R 791 0 R 793 0 R 797 0 R 803 0 R 808 0 R 812 0 R 815 0 R 821 0 R
  823 0 R 827 0 R 835 0 R 837 0 R 840 0 R 843 0 R 846 0 R 849 0 R 863 0 R 871 0 R 882 0 R 893 0 R 899 0 R 909 0 R 920 0 R 923 0 R 927 0 R 930 0 R 935 0 R 944 0 R 952 0 R 956 0 R 960 0 R 965 0 R 969 0 R 971 0 R 987 0 R 998 0 R 1003 0 R 1010 0 R 1013 0 R 1021 0 R 1029 0 R 1034 0 R 1039 0 R 1044 0 R 1046 0 R 1048 0 R 1050 0 R 1060 0 R 1068 0 R 1072 0 R 1079 0 R 1086 0 R 1094 0 R 1098 0 R 1104 0 R 1109 0 R 1117 0 R 1121 0 R 1126 0 R 1128 0 R 1134 0 R 1142 0 R 1148 0 R 1155 0 R 1166 0 R 1170 0 R 1172 0 R 1174 0 R 1178 0 R 1181 0 R 1186 0 R 1189 0 R 1201 0 R 1205 0 R 1211 0 R 1219 0 R 1224 0 R 1228 0 R 1232 0 R 1234 0 R 1237 0 R 1240 0 R 1243 0 R 1247 0 R 1251 0 R 1255 0 R 1260 0 R 1264 0 R 1267 0 R 1269 0 R 1279 0 R 1282 0 R 1290 0 R 1299 0 R 1305 0 R 1309 0 R 1311 0 R 1322 0 R 1325 0 R 1331 0 R 1339 0 R 1342 0 R 1349 0 R 1357 0 R 1359 0 R 1361 0 R 1370 0 R 1372 0 R 1374 0 R 1377 0 R 1379 0 R 1381 0 R 1383 0 R 1385 0 R 1388 0 R 1392 0 R 1397 0 R 1399 0 R 1401 0 R 1403 0 R 1408 0 R 1415 0 
 R 1421 0 R 1424 0 R 1426 0 R 1429 0 R 1433 0 R 1437 0 R 1440 0 R 1442 0 R 1444 0 R 1447 0 R 1452 0 R 1458 0 R 1466 0 R 1480 0 R 1494 0 R 1497 0 R 1502 0 R 1515 0 R 1520 0 R 1535 0 R 1543 0 R 1547 0 R 1556 0 R 1571 0 R 1585 0 R 1597 0 R 1602 0 R 1608 0 R 1618 0 R 1623 0 R 1628 0 R 1636 0 R 1639 0 R 1648 0 R 1654 0 R 1658 0 R 1670 0 R 1675 0 R 1681 0 R 1683 0 R 1690 0 R 1698 0 R 1706 0 R 1710 0 R 1712 0 R 1714 0 R 1726 0 R 1732 0 R 1741 0 R 1747 0 R 1760 0 R 1766 0 R 1772 0 R 1783 0 R 1789 0 R 1794 0 R 1799 0 R 1802 0 R 1805 0 R 1810 0 R 1815 0 R 1822 0 R 1826 0 R 1831 0 R 1840 0 R 1845 0 R 1850 0 R 1852 0 R 1861 0 R 1868 0 R 1874 0 R 1879 0 R 1883 0 R 1887 0 R 1892 0 R 1897 0 R 1903 0 R 1905 0 R 1907 0 R 1910 0 R 1921 0 R 1924 0 R 1931 0 R 1939 0 R 1944 0 R 1948 0 R 1953 0 R 1955 0 R 1958 0 R 1963 0 R 1966 0 R 1968 0 R 1971 0 R 1974 0 R 1977 0 R 1987 0 R 1992 0 R 1997 0 R 1999 0 R 2007 0 R 2014 0 R 2021 0 R 2027 0 R 2032 0 R 2034 0 R 2043 0 R 2053 0 R 2063 0 R 2069 0 R 2076 0 R 2078 
 0 R 2083 0 R 2085 0 R 2087 0 R 2091 0 R 2094 0 R 2097 0 R 2102 0 R 2106 0 R 2117 0 R 2120 0 R 2125 0 R 2128 0 R 2130 0 R 2135 0 R 2145 0 R 2147 0 R 2149 0 R 2151 0 R 2153 0 R 2156 0 R 2158 0 R 2160 0 R 2163 0 R 2165 0 R 2167 0 R 2171 0 R 2176 0 R 2185 0 R 2187 0 R 2189 0 R 2195 0 R 2197 0 R 2202 0 R 2204 0 R 2206 0 R 2213 0 R 2218 0 R 2222 0 R 2227 0 R 2231 0 R 2233 0 R 2235 0 R 2239 0 R 2242 0 R 2244 0 R 2246 0 R 2250 0 R 2252 0 R 2255 0 R 2257 0 R 2259 0 R 2261 0 R 2268 0 R 2271 0 R 2276 0 R 2278 0 R 2280 0 R 2282 0 R 2284 0 R 2292 0 R 2303 0 R 2317 0 R 2328 0 R 2332 0 R 2337 0 R 2341 0 R 2344 0 R 2349 0 R 2355 0 R 2357 0 R 2360 0 R 2362 0 R 2364 0 R 2366 0 R 2371 0 R 2373 0 R 2386 0 R 2389 0 R 2397 0 R 2403 0 R 2415 0 R 2429 0 R 2442 0 R 2461 0 R 2463 0 R 2465 0 R 2469 0 R 2487 0 R 2493 0 R 2505 0 R 2509 0 R 2513 0 R 2522 0 R 2534 0 R 2539 0 R 2549 0 R 2562 0 R 2581 0 R 2590 0 R 2593 0 R 2602 0 R 2619 0 R 2626 0 R 2629 0 R 2634 0 R 2638 0 R 2641 0 R 2650 0 R 2659 0 R 2662 0 R 266
 4 0 R 2668 0 R 2682 0 R 2691 0 R 2696 0 R 2700 0 R 2703 0 R 2705 0 R 2707 0 R 2709 0 R 2714 0 R 2727 0 R 2737 0 R 2745 0 R 2751 0 R 2756 0 R 2767 0 R 2774 0 R 2780 0 R 2782 0 R 2791 0 R 2799 0 R 2801 0 R 2809 0 R 2817 0 R 2819 0 R 2828 0 R 2831 0 R 2841 0 R 2845 0 R 2854 0 R 2862 0 R 2867 0 R 2871 0 R 2875 0 R 2877 0 R 2883 0 R 2887 0 R 2891 0 R 2897 0 R 2903 0 R 2906 0 R 2912 0 R 2916 0 R 2925 0 R 2930 0 R 2935 0 R 2945 0 R 2951 0 R 2958 0 R 2961 0 R 2964 0 R 2971 0 R 2976 0 R 2979 0 R 2984 0 R 2994 0 R 2999 0 R 3001 0 R 3005 0 R 3012 0 R 3015 0 R 3026 0 R 3029 0 R 3036 0 R 3044 0 R 3048 0 R 3058 0 R 3063 0 R 3067 0 R 3075 0 R 3081 0 R 3085 0 R 3087 0 R 3098 0 R 3103 0 R 3106 0 R 3108 0 R 3110 0 R 3120 0 R 3127 0 R 3131 0 R 3134 0 R 3140 0 R 3144 0 R 3147 0 R 3151 0 R 3156 0 R 3164 0 R 3169 0 R 3174 0 R 3179 0 R 3181 0 R 3184 0 R 3186 0 R 3190 0 R 3201 0 R 3203 0 R 3207 0 R 3210 0 R 3214 0 R 3217 0 R 3221 0 R 3223 0 R 3236 0 R 3241 0 R 3246 0 R 3252 0 R 3260 0 R 3262 0 R 3270 0 R 3
 288 0 R 3299 0 R 3306 0 R 3322 0 R 3325 0 R 3330 0 R 3332 0 R 3339 0 R 3344 0 R 3347 0 R 3349 0 R 3351 0 R 3353 0 R 3356 0 R 3374 0 R 3377 0 R 3382 0 R 3388 0 R 3398 0 R 3403 0 R 3413 0 R 3423 0 R 3431 0 R 3436 0 R 3442 0 R 3447 0 R 3450 0 R 3458 0 R 3462 0 R 3467 0 R 3472 0 R 3485 0 R 3488 0 R 3494 0 R 3499 0 R 3508 0 R 3518 0 R 3524 0 R 3533 0 R 3542 0 R 3547 0 R 3553 0 R 3559 0 R 3564 0 R 3566 0 R 3572 0 R 3579 0 R 3581 0 R 3587 0 R 3589 0 R 3595 0 R 3603 0 R 3609 0 R 3618 0 R 3625 0 R 3636 0 R 3646 0 R 3657 0 R 3670 0 R 3673 0 R 3675 0 R 3680 0 R 3695 0 R 3701 0 R 3707 0 R 3711 0 R 3714 0 R 3719 0 R 3721 0 R 3725 0 R 3727 0 R 3731 0 R 3734 0 R 3737 0 R 3745 0 R 3747 0 R 3751 0 R 3754 0 R 3762 0 R 3770 0 R 3774 0 R 3777 0 R 3779 0 R 3783 0 R 3788 0 R 3793 0 R 3796 0 R 3805 0 R 3810 0 R 3814 0 R 3817 0 R 3825 0 R 3830 0 R 3838 0 R 3843 0 R 3845 0 R 3851 0 R 3853 0 R 3858 0 R 3862 0 R 3868 0 R 3872 0 R 3884 0 R 3900 0 R 3915 0 R 3920 0 R 3923 0 R 3926 0 R 3932 0 R 3937 0 R 3939 0 R
  3941 0 R 3943 0 R 3945 0 R 3947 0 R 3956 0 R 3960 0 R 3964 0 R 3968 0 R 3970 0 R 3977 0 R 3987 0 R 3994 0 R 3997 0 R 4000 0 R 4002 0 R 4009 0 R 4016 0 R 4026 0 R 4030 0 R 4033 0 R 4037 0 R 4041 0 R 4047 0 R 4050 0 R 4066 0 R 4071 0 R 4094 0 R 4098 0 R 4105 0 R 4116 0 R 4125 0 R 4128 0 R 4131 0 R 4134 0 R 4150 0 R 4156 0 R 4163 0 R]
+/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 0 R 54 0 R 61 0 R 63 0 R 67 0 R 69 0 R 71 0 R 78 0 R 81 0 R 83 0 R 89 0 R 92 0 R 94 0 R 96 0 R 103 0 R 110 0 R 115 0 R 117 0 R 133 0 R 138 0 R 146 0 R 155 0 R 163 0 R 172 0 R 183 0 R 187 0 R 189 0 R 193 0 R 202 0 R 211 0 R 219 0 R 228 0 R 233 0 R 242 0 R 250 0 R 259 0 R 272 0 R 279 0 R 289 0 R 297 0 R 305 0 R 312 0 R 320 0 R 327 0 R 333 0 R 340 0 R 348 0 R 357 0 R 366 0 R 380 0 R 387 0 R 395 0 R 402 0 R 410 0 R 419 0 R 429 0 R 437 0 R 444 0 R 453 0 R 465 0 R 475 0 R 482 0 R 489 0 R 497 0 R 506 0 R 514 0 R 519 0 R 523 0 R 528 0 R 532 0 R 548 0 R 559 0 R 563 0 R 578 0 R 583 0 R 588 0 R 590 0 R 592 0 R 595 0 R 597 0 R 599 0 R 607 0 R 613 0 R 618 0 R 623 0 R 630 0 R 640 0 R 648 0 R 652 0 R 656 0 R 658 0 R 668 0 R 682 0 R 691 0 R 700 0 R 710 0 R 721 0 R 732 0 R 751 0 R 757 0 R 760 0 R 766 0 R 769 0 R 773 0 R 777 0 R 780 0 R 783 0 R 785 0 R 788 0 R 792 0 R 794 0 R 798 0 R 804 0 R 809 0 R 813 0 R 816 0 R 822 0 R
  824 0 R 828 0 R 836 0 R 838 0 R 841 0 R 844 0 R 847 0 R 850 0 R 864 0 R 872 0 R 883 0 R 894 0 R 900 0 R 910 0 R 921 0 R 924 0 R 928 0 R 931 0 R 936 0 R 945 0 R 953 0 R 957 0 R 961 0 R 966 0 R 970 0 R 972 0 R 988 0 R 999 0 R 1004 0 R 1011 0 R 1014 0 R 1022 0 R 1030 0 R 1035 0 R 1040 0 R 1045 0 R 1047 0 R 1049 0 R 1051 0 R 1061 0 R 1069 0 R 1073 0 R 1080 0 R 1087 0 R 1095 0 R 1099 0 R 1105 0 R 1110 0 R 1118 0 R 1122 0 R 1127 0 R 1129 0 R 1135 0 R 1143 0 R 1149 0 R 1156 0 R 1167 0 R 1171 0 R 1173 0 R 1175 0 R 1179 0 R 1182 0 R 1187 0 R 1190 0 R 1202 0 R 1206 0 R 1212 0 R 1220 0 R 1225 0 R 1229 0 R 1233 0 R 1235 0 R 1238 0 R 1241 0 R 1244 0 R 1248 0 R 1252 0 R 1256 0 R 1261 0 R 1265 0 R 1268 0 R 1270 0 R 1280 0 R 1283 0 R 1291 0 R 1300 0 R 1306 0 R 1310 0 R 1312 0 R 1323 0 R 1326 0 R 1332 0 R 1340 0 R 1343 0 R 1350 0 R 1358 0 R 1360 0 R 1362 0 R 1371 0 R 1373 0 R 1375 0 R 1378 0 R 1380 0 R 1382 0 R 1384 0 R 1386 0 R 1389 0 R 1393 0 R 1398 0 R 1400 0 R 1402 0 R 1404 0 R 1409 0 R 1416 0 
 R 1422 0 R 1425 0 R 1427 0 R 1430 0 R 1434 0 R 1438 0 R 1441 0 R 1443 0 R 1445 0 R 1448 0 R 1453 0 R 1459 0 R 1467 0 R 1481 0 R 1495 0 R 1498 0 R 1503 0 R 1516 0 R 1521 0 R 1536 0 R 1544 0 R 1548 0 R 1557 0 R 1572 0 R 1586 0 R 1598 0 R 1603 0 R 1609 0 R 1619 0 R 1624 0 R 1629 0 R 1637 0 R 1640 0 R 1649 0 R 1655 0 R 1659 0 R 1671 0 R 1676 0 R 1682 0 R 1684 0 R 1691 0 R 1699 0 R 1707 0 R 1711 0 R 1713 0 R 1715 0 R 1727 0 R 1733 0 R 1742 0 R 1748 0 R 1761 0 R 1767 0 R 1773 0 R 1784 0 R 1790 0 R 1795 0 R 1800 0 R 1803 0 R 1806 0 R 1811 0 R 1816 0 R 1823 0 R 1827 0 R 1832 0 R 1841 0 R 1846 0 R 1851 0 R 1853 0 R 1862 0 R 1869 0 R 1875 0 R 1880 0 R 1884 0 R 1888 0 R 1893 0 R 1898 0 R 1904 0 R 1906 0 R 1908 0 R 1911 0 R 1922 0 R 1925 0 R 1932 0 R 1940 0 R 1945 0 R 1949 0 R 1954 0 R 1956 0 R 1959 0 R 1964 0 R 1967 0 R 1969 0 R 1972 0 R 1975 0 R 1978 0 R 1988 0 R 1993 0 R 1998 0 R 2000 0 R 2008 0 R 2015 0 R 2022 0 R 2028 0 R 2033 0 R 2035 0 R 2044 0 R 2054 0 R 2064 0 R 2070 0 R 2077 0 R 2079 
 0 R 2084 0 R 2086 0 R 2088 0 R 2092 0 R 2095 0 R 2098 0 R 2103 0 R 2107 0 R 2118 0 R 2121 0 R 2126 0 R 2129 0 R 2131 0 R 2136 0 R 2146 0 R 2148 0 R 2150 0 R 2152 0 R 2154 0 R 2157 0 R 2159 0 R 2161 0 R 2164 0 R 2166 0 R 2168 0 R 2172 0 R 2177 0 R 2186 0 R 2188 0 R 2190 0 R 2196 0 R 2198 0 R 2203 0 R 2205 0 R 2207 0 R 2214 0 R 2219 0 R 2223 0 R 2228 0 R 2232 0 R 2234 0 R 2236 0 R 2240 0 R 2243 0 R 2245 0 R 2247 0 R 2251 0 R 2253 0 R 2256 0 R 2258 0 R 2260 0 R 2262 0 R 2269 0 R 2272 0 R 2277 0 R 2279 0 R 2281 0 R 2283 0 R 2285 0 R 2293 0 R 2304 0 R 2318 0 R 2329 0 R 2333 0 R 2338 0 R 2342 0 R 2345 0 R 2350 0 R 2356 0 R 2358 0 R 2361 0 R 2363 0 R 2365 0 R 2367 0 R 2372 0 R 2374 0 R 2387 0 R 2390 0 R 2398 0 R 2404 0 R 2416 0 R 2430 0 R 2443 0 R 2462 0 R 2464 0 R 2466 0 R 2470 0 R 2488 0 R 2494 0 R 2506 0 R 2510 0 R 2514 0 R 2523 0 R 2535 0 R 2540 0 R 2550 0 R 2563 0 R 2582 0 R 2591 0 R 2594 0 R 2603 0 R 2620 0 R 2627 0 R 2630 0 R 2635 0 R 2639 0 R 2642 0 R 2651 0 R 2660 0 R 2663 0 R 266
 5 0 R 2669 0 R 2683 0 R 2692 0 R 2697 0 R 2701 0 R 2704 0 R 2706 0 R 2708 0 R 2710 0 R 2715 0 R 2728 0 R 2738 0 R 2746 0 R 2752 0 R 2757 0 R 2768 0 R 2775 0 R 2781 0 R 2783 0 R 2792 0 R 2800 0 R 2802 0 R 2810 0 R 2818 0 R 2820 0 R 2829 0 R 2832 0 R 2842 0 R 2846 0 R 2855 0 R 2863 0 R 2868 0 R 2872 0 R 2876 0 R 2878 0 R 2884 0 R 2888 0 R 2892 0 R 2898 0 R 2904 0 R 2907 0 R 2913 0 R 2917 0 R 2926 0 R 2931 0 R 2936 0 R 2946 0 R 2952 0 R 2959 0 R 2962 0 R 2965 0 R 2972 0 R 2977 0 R 2980 0 R 2985 0 R 2995 0 R 3000 0 R 3002 0 R 3006 0 R 3013 0 R 3016 0 R 3027 0 R 3030 0 R 3037 0 R 3045 0 R 3049 0 R 3059 0 R 3064 0 R 3068 0 R 3076 0 R 3082 0 R 3086 0 R 3088 0 R 3099 0 R 3104 0 R 3107 0 R 3109 0 R 3111 0 R 3121 0 R 3128 0 R 3132 0 R 3135 0 R 3141 0 R 3145 0 R 3148 0 R 3152 0 R 3157 0 R 3165 0 R 3170 0 R 3175 0 R 3180 0 R 3182 0 R 3185 0 R 3187 0 R 3191 0 R 3202 0 R 3204 0 R 3208 0 R 3211 0 R 3215 0 R 3218 0 R 3222 0 R 3224 0 R 3237 0 R 3242 0 R 3247 0 R 3253 0 R 3261 0 R 3263 0 R 3271 0 R 3
 289 0 R 3300 0 R 3307 0 R 3323 0 R 3326 0 R 3331 0 R 3333 0 R 3340 0 R 3345 0 R 3348 0 R 3350 0 R 3352 0 R 3354 0 R 3357 0 R 3375 0 R 3378 0 R 3383 0 R 3389 0 R 3399 0 R 3404 0 R 3414 0 R 3424 0 R 3432 0 R 3437 0 R 3443 0 R 3448 0 R 3451 0 R 3459 0 R 3463 0 R 3468 0 R 3473 0 R 3486 0 R 3489 0 R 3495 0 R 3500 0 R 3509 0 R 3518 0 R 3524 0 R 3533 0 R 3542 0 R 3547 0 R 3553 0 R 3559 0 R 3563 0 R 3565 0 R 3571 0 R 3578 0 R 3580 0 R 3586 0 R 3588 0 R 3594 0 R 3602 0 R 3608 0 R 3617 0 R 3624 0 R 3635 0 R 3645 0 R 3656 0 R 3669 0 R 3672 0 R 3674 0 R 3679 0 R 3694 0 R 3700 0 R 3706 0 R 3710 0 R 3713 0 R 3718 0 R 3720 0 R 3724 0 R 3726 0 R 3730 0 R 3733 0 R 3736 0 R 3744 0 R 3746 0 R 3750 0 R 3753 0 R 3761 0 R 3769 0 R 3773 0 R 3776 0 R 3778 0 R 3782 0 R 3787 0 R 3792 0 R 3795 0 R 3804 0 R 3809 0 R 3813 0 R 3816 0 R 3824 0 R 3829 0 R 3837 0 R 3842 0 R 3844 0 R 3850 0 R 3852 0 R 3857 0 R 3861 0 R 3867 0 R 3871 0 R 3883 0 R 3899 0 R 3914 0 R 3919 0 R 3922 0 R 3925 0 R 3931 0 R 3936 0 R 3938 0 R
  3940 0 R 3942 0 R 3944 0 R 3946 0 R 3955 0 R 3959 0 R 3963 0 R 3967 0 R 3969 0 R 3976 0 R 3986 0 R 3995 0 R 3998 0 R 4001 0 R 4003 0 R 4010 0 R 4017 0 R 4027 0 R 4031 0 R 4034 0 R 4038 0 R 4042 0 R 4048 0 R 4051 0 R 4067 0 R 4072 0 R 4095 0 R 4099 0 R 4106 0 R 4117 0 R 4126 0 R 4129 0 R 4132 0 R 4135 0 R 4151 0 R 4157 0 R 4164 0 R]
 >>
 endobj
 4 0 obj
@@ -187,11 +187,11 @@ endobj
 << /Type /Font
 /BaseFont /71be00+NotoSerif
 /Subtype /TrueType
-/FontDescriptor 4843 0 R
+/FontDescriptor 4844 0 R
 /FirstChar 32
 /LastChar 255
-/Widths 4845 0 R
-/ToUnicode 4844 0 R
+/Widths 4846 0 R
+/ToUnicode 4845 0 R
 >>
 endobj
 11 0 obj
@@ -1750,7 +1750,7 @@ endobj
 /F1.0 10 0 R
 >>
 >>
-/Annots [4165 0 R 4166 0 R 4167 0 R 4168 0 R 4169 0 R 4170 0 R 4171 0 R 4172 0 R 4173 0 R 4174 0 R 4175 0 R 4176 0 R 4177 0 R 4178 0 R 4179 0 R 4180 0 R 4181 0 R 4182 0 R 4183 0 R 4184 0 R 4185 0 R 4186 0 R 4187 0 R 4188 0 R 4189 0 R 4190 0 R 4191 0 R 4192 0 R 4193 0 R 4194 0 R 4195 0 R 4196 0 R 4197 0 R 4198 0 R 4199 0 R 4200 0 R 4201 0 R 4202 0 R 4203 0 R 4204 0 R 4205 0 R 4206 0 R 4207 0 R 4208 0 R 4209 0 R 4210 0 R 4211 0 R 4212 0 R 4213 0 R 4214 0 R 4215 0 R 4216 0 R 4217 0 R 4218 0 R 4219 0 R 4220 0 R 4221 0 R 4222 0 R 4223 0 R 4224 0 R 4225 0 R 4226 0 R 4227 0 R 4228 0 R 4229 0 R 4230 0 R 4231 0 R 4232 0 R 4233 0 R 4234 0 R 4235 0 R 4236 0 R 4237 0 R 4238 0 R 4239 0 R 4240 0 R]
+/Annots [4166 0 R 4167 0 R 4168 0 R 4169 0 R 4170 0 R 4171 0 R 4172 0 R 4173 0 R 4174 0 R 4175 0 R 4176 0 R 4177 0 R 4178 0 R 4179 0 R 4180 0 R 4181 0 R 4182 0 R 4183 0 R 4184 0 R 4185 0 R 4186 0 R 4187 0 R 4188 0 R 4189 0 R 4190 0 R 4191 0 R 4192 0 R 4193 0 R 4194 0 R 4195 0 R 4196 0 R 4197 0 R 4198 0 R 4199 0 R 4200 0 R 4201 0 R 4202 0 R 4203 0 R 4204 0 R 4205 0 R 4206 0 R 4207 0 R 4208 0 R 4209 0 R 4210 0 R 4211 0 R 4212 0 R 4213 0 R 4214 0 R 4215 0 R 4216 0 R 4217 0 R 4218 0 R 4219 0 R 4220 0 R 4221 0 R 4222 0 R 4223 0 R 4224 0 R 4225 0 R 4226 0 R 4227 0 R 4228 0 R 4229 0 R 4230 0 R 4231 0 R 4232 0 R 4233 0 R 4234 0 R 4235 0 R 4236 0 R 4237 0 R 4238 0 R 4239 0 R 4240 0 R 4241 0 R]
 >>
 endobj
 13 0 obj
@@ -3417,7 +3417,7 @@ endobj
 /Font << /F1.0 10 0 R
 >>
 >>
-/Annots [4241 0 R 4242 0 R 4243 0 R 4244 0 R 4245 0 R 4246 0 R 4247 0 R 4248 0 R 4249 0 R 4250 0 R 4251 0 R 4252 0 R 4253 0 R 4254 0 R 4255 0 R 4256 0 R 4257 0 R 4258 0 R 4259 0 R 4260 0 R 4261 0 R 4262 0 R 4263 0 R 4264 0 R 4265 0 R 4266 0 R 4267 0 R 4268 0 R 4269 0 R 4270 0 R 4271 0 R 4272 0 R 4273 0 R 4274 0 R 4275 0 R 4276 0 R 4277 0 R 4278 0 R 4279 0 R 4280 0 R 4281 0 R 4282 0 R 4283 0 R 4284 0 R 4285 0 R 4286 0 R 4287 0 R 4288 0 R 4289 0 R 4290 0 R 4291 0 R 4292 0 R 4293 0 R 4294 0 R 4295 0 R 4296 0 R 4297 0 R 4298 0 R 4299 0 R 4300 0 R 4301 0 R 4302 0 R 4303 0 R 4304 0 R 4305 0 R 4306 0 R 4307 0 R 4308 0 R 4309 0 R 4310 0 R 4311 0 R 4312 0 R 4313 0 R 4314 0 R 4315 0 R 4316 0 R 4317 0 R 4318 0 R 4319 0 R 4320 0 R 4321 0 R 4322 0 R]
+/Annots [4242 0 R 4243 0 R 4244 0 R 4245 0 R 4246 0 R 4247 0 R 4248 0 R 4249 0 R 4250 0 R 4251 0 R 4252 0 R 4253 0 R 4254 0 R 4255 0 R 4256 0 R 4257 0 R 4258 0 R 4259 0 R 4260 0 R 4261 0 R 4262 0 R 4263 0 R 4264 0 R 4265 0 R 4266 0 R 4267 0 R 4268 0 R 4269 0 R 4270 0 R 4271 0 R 4272 0 R 4273 0 R 4274 0 R 4275 0 R 4276 0 R 4277 0 R 4278 0 R 4279 0 R 4280 0 R 4281 0 R 4282 0 R 4283 0 R 4284 0 R 4285 0 R 4286 0 R 4287 0 R 4288 0 R 4289 0 R 4290 0 R 4291 0 R 4292 0 R 4293 0 R 4294 0 R 4295 0 R 4296 0 R 4297 0 R 4298 0 R 4299 0 R 4300 0 R 4301 0 R 4302 0 R 4303 0 R 4304 0 R 4305 0 R 4306 0 R 4307 0 R 4308 0 R 4309 0 R 4310 0 R 4311 0 R 4312 0 R 4313 0 R 4314 0 R 4315 0 R 4316 0 R 4317 0 R 4318 0 R 4319 0 R 4320 0 R 4321 0 R 4322 0 R 4323 0 R]
 >>
 endobj
 15 0 obj
@@ -5084,7 +5084,7 @@ endobj
 /Font << /F1.0 10 0 R
 >>
 >>
-/Annots [4323 0 R 4324 0 R 4325 0 R 4326 0 R 4327 0 R 4328 0 R 4329 0 R 4330 0 R 4331 0 R 4332 0 R 4333 0 R 4334 0 R 4335 0 R 4336 0 R 4337 0 R 4338 0 R 4339 0 R 4340 0 R 4341 0 R 4342 0 R 4343 0 R 4344 0 R 4345 0 R 4346 0 R 4347 0 R 4348 0 R 4349 0 R 4350 0 R 4351 0 R 4352 0 R 4353 0 R 4354 0 R 4355 0 R 4356 0 R 4357 0 R 4358 0 R 4359 0 R 4360 0 R 4361 0 R 4362 0 R 4363 0 R 4364 0 R 4365 0 R 4366 0 R 4367 0 R 4368 0 R 4369 0 R 4370 0 R 4371 0 R 4372 0 R 4373 0 R 4374 0 R 4375 0 R 4376 0 R 4377 0 R 4378 0 R 4379 0 R 4380 0 R 4381 0 R 4382 0 R 4383 0 R 4384 0 R 4385 0 R 4386 0 R 4387 0 R 4388 0 R 4389 0 R 4390 0 R 4391 0 R 4392 0 R 4393 0 R 4394 0 R 4395 0 R 4396 0 R 4397 0 R 4398 0 R 4399 0 R 4400 0 R 4401 0 R 4402 0 R 4403 0 R 4404 0 R]
+/Annots [4324 0 R 4325 0 R 4326 0 R 4327 0 R 4328 0 R 4329 0 R 4330 0 R 4331 0 R 4332 0 R 4333 0 R 4334 0 R 4335 0 R 4336 0 R 4337 0 R 4338 0 R 4339 0 R 4340 0 R 4341 0 R 4342 0 R 4343 0 R 4344 0 R 4345 0 R 4346 0 R 4347 0 R 4348 0 R 4349 0 R 4350 0 R 4351 0 R 4352 0 R 4353 0 R 4354 0 R 4355 0 R 4356 0 R 4357 0 R 4358 0 R 4359 0 R 4360 0 R 4361 0 R 4362 0 R 4363 0 R 4364 0 R 4365 0 R 4366 0 R 4367 0 R 4368 0 R 4369 0 R 4370 0 R 4371 0 R 4372 0 R 4373 0 R 4374 0 R 4375 0 R 4376 0 R 4377 0 R 4378 0 R 4379 0 R 4380 0 R 4381 0 R 4382 0 R 4383 0 R 4384 0 R 4385 0 R 4386 0 R 4387 0 R 4388 0 R 4389 0 R 4390 0 R 4391 0 R 4392 0 R 4393 0 R 4394 0 R 4395 0 R 4396 0 R 4397 0 R 4398 0 R 4399 0 R 4400 0 R 4401 0 R 4402 0 R 4403 0 R 4404 0 R 4405 0 R]
 >>
 endobj
 17 0 obj
@@ -6751,7 +6751,7 @@ endobj
 /Font << /F1.0 10 0 R
 >>
 >>
-/Annots [4405 0 R 4406 0 R 4407 0 R 4408 0 R 4409 0 R 4410 0 R 4411 0 R 4412 0 R 4413 0 R 4414 0 R 4415 0 R 4416 0 R 4417 0 R 4418 0 R 4419 0 R 4420 0 R 4421 0 R 4422 0 R 4423 0 R 4424 0 R 4425 0 R 4426 0 R 4427 0 R 4428 0 R 4429 0 R 4430 0 R 4431 0 R 4432 0 R 4433 0 R 4434 0 R 4435 0 R 4436 0 R 4437 0 R 4438 0 R 4439 0 R 4440 0 R 4441 0 R 4442 0 R 4443 0 R 4444 0 R 4445 0 R 4446 0 R 4447 0 R 4448 0 R 4449 0 R 4450 0 R 4451 0 R 4452 0 R 4453 0 R 4454 0 R 4455 0 R 4456 0 R 4457 0 R 4458 0 R 4459 0 R 4460 0 R 4461 0 R 4462 0 R 4463 0 R 4464 0 R 4465 0 R 4466 0 R 4467 0 R 4468 0 R 4469 0 R 4470 0 R 4471 0 R 4472 0 R 4473 0 R 4474 0 R 4475 0 R 4476 0 R 4477 0 R 4478 0 R 4479 0 R 4480 0 R 4481 0 R 4482 0 R 4483 0 R 4484 0 R 4485 0 R 4486 0 R]
+/Annots [4406 0 R 4407 0 R 4408 0 R 4409 0 R 4410 0 R 4411 0 R 4412 0 R 4413 0 R 4414 0 R 4415 0 R 4416 0 R 4417 0 R 4418 0 R 4419 0 R 4420 0 R 4421 0 R 4422 0 R 4423 0 R 4424 0 R 4425 0 R 4426 0 R 4427 0 R 4428 0 R 4429 0 R 4430 0 R 4431 0 R 4432 0 R 4433 0 R 4434 0 R 4435 0 R 4436 0 R 4437 0 R 4438 0 R 4439 0 R 4440 0 R 4441 0 R 4442 0 R 4443 0 R 4444 0 R 4445 0 R 4446 0 R 4447 0 R 4448 0 R 4449 0 R 4450 0 R 4451 0 R 4452 0 R 4453 0 R 4454 0 R 4455 0 R 4456 0 R 4457 0 R 4458 0 R 4459 0 R 4460 0 R 4461 0 R 4462 0 R 4463 0 R 4464 0 R 4465 0 R 4466 0 R 4467 0 R 4468 0 R 4469 0 R 4470 0 R 4471 0 R 4472 0 R 4473 0 R 4474 0 R 4475 0 R 4476 0 R 4477 0 R 4478 0 R 4479 0 R 4480 0 R 4481 0 R 4482 0 R 4483 0 R 4484 0 R 4485 0 R 4486 0 R 4487 0 R]
 >>
 endobj
 19 0 obj
@@ -8429,7 +8429,7 @@ endobj
 /Font << /F1.0 10 0 R
 >>
 >>
-/Annots [4487 0 R 4488 0 R 4489 0 R 4490 0 R 4491 0 R 4492 0 R 4493 0 R 4494 0 R 4495 0 R 4496 0 R 4497 0 R 4498 0 R 4499 0 R 4500 0 R 4501 0 R 4502 0 R 4503 0 R 4504 0 R 4505 0 R 4506 0 R 4507 0 R 4508 0 R 4509 0 R 4510 0 R 4511 0 R 4512 0 R 4513 0 R 4514 0 R 4515 0 R 4516 0 R 4517 0 R 4518 0 R 4519 0 R 4520 0 R 4521 0 R 4522 0 R 4523 0 R 4524 0 R 4525 0 R 4526 0 R 4527 0 R 4528 0 R 4529 0 R 4530 0 R 4531 0 R 4532 0 R 4533 0 R 4534 0 R 4535 0 R 4536 0 R 4537 0 R 4538 0 R 4539 0 R 4540 0 R 4541 0 R 4542 0 R 4543 0 R 4544 0 R 4545 0 R 4546 0 R 4547 0 R 4548 0 R 4549 0 R 4550 0 R 4551 0 R 4552 0 R 4553 0 R 4554 0 R 4555 0 R 4556 0 R 4557 0 R 4558 0 R 4559 0 R 4560 0 R 4561 0 R 4562 0 R 4563 0 R 4564 0 R 4565 0 R 4566 0 R 4567 0 R 4568 0 R 4569 0 R 4570 0 R]
+/Annots [4488 0 R 4489 0 R 4490 0 R 4491 0 R 4492 0 R 4493 0 R 4494 0 R 4495 0 R 4496 0 R 4497 0 R 4498 0 R 4499 0 R 4500 0 R 4501 0 R 4502 0 R 4503 0 R 4504 0 R 4505 0 R 4506 0 R 4507 0 R 4508 0 R 4509 0 R 4510 0 R 4511 0 R 4512 0 R 4513 0 R 4514 0 R 4515 0 R 4516 0 R 4517 0 R 4518 0 R 4519 0 R 4520 0 R 4521 0 R 4522 0 R 4523 0 R 4524 0 R 4525 0 R 4526 0 R 4527 0 R 4528 0 R 4529 0 R 4530 0 R 4531 0 R 4532 0 R 4533 0 R 4534 0 R 4535 0 R 4536 0 R 4537 0 R 4538 0 R 4539 0 R 4540 0 R 4541 0 R 4542 0 R 4543 0 R 4544 0 R 4545 0 R 4546 0 R 4547 0 R 4548 0 R 4549 0 R 4550 0 R 4551 0 R 4552 0 R 4553 0 R 4554 0 R 4555 0 R 4556 0 R 4557 0 R 4558 0 R 4559 0 R 4560 0 R 4561 0 R 4562 0 R 4563 0 R 4564 0 R 4565 0 R 4566 0 R 4567 0 R 4568 0 R 4569 0 R 4570 0 R 4571 0 R]
 >>
 endobj
 21 0 obj
@@ -9296,7 +9296,7 @@ endobj
 /Font << /F1.0 10 0 R
 >>
 >>
-/Annots [4571 0 R 4572 0 R 4573 0 R 4574 0 R 4575 0 R 4576 0 R 4577 0 R 4578 0 R 4579 0 R 4580 0 R 4581 0 R 4582 0 R 4583 0 R 4584 0 R 4585 0 R 4586 0 R 4587 0 R 4588 0 R 4589 0 R 4590 0 R 4591 0 R 4592 0 R 4593 0 R 4594 0 R 4595 0 R 4596 0 R 4597 0 R 4598 0 R 4599 0 R 4600 0 R 4601 0 R 4602 0 R 4603 0 R 4604 0 R 4605 0 R 4606 0 R 4607 0 R 4608 0 R 4609 0 R 4610 0 R 4611 0 R 4612 0 R]
+/Annots [4572 0 R 4573 0 R 4574 0 R 4575 0 R 4576 0 R 4577 0 R 4578 0 R 4579 0 R 4580 0 R 4581 0 R 4582 0 R 4583 0 R 4584 0 R 4585 0 R 4586 0 R 4587 0 R 4588 0 R 4589 0 R 4590 0 R 4591 0 R 4592 0 R 4593 0 R 4594 0 R 4595 0 R 4596 0 R 4597 0 R 4598 0 R 4599 0 R 4600 0 R 4601 0 R 4602 0 R 4603 0 R 4604 0 R 4605 0 R 4606 0 R 4607 0 R 4608 0 R 4609 0 R 4610 0 R 4611 0 R 4612 0 R 4613 0 R]
 >>
 endobj
 23 0 obj
@@ -10108,7 +10108,7 @@ endobj
 /F4.0 35 0 R
 /F1.1 38 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [30 0 R 31 0 R 32 0 R 34 0 R 36 0 R 37 0 R 39 0 R 40 0 R 41 0 R]
@@ -10123,7 +10123,7 @@ endobj
 >>
 endobj
 27 0 obj
-<< /Kids [642 0 R 3634 0 R 1914 0 R 643 0 R 3562 0 R 1158 0 R 2529 0 R 3765 0 R]
+<< /Kids [643 0 R 3633 0 R 1915 0 R 644 0 R 3992 0 R 1159 0 R 2530 0 R 3764 0 R]
 >>
 endobj
 28 0 obj
@@ -10133,11 +10133,11 @@ endobj
 << /Type /Font
 /BaseFont /358635+NotoSerif-Bold
 /Subtype /TrueType
-/FontDescriptor 4847 0 R
+/FontDescriptor 4848 0 R
 /FirstChar 32
 /LastChar 255
-/Widths 4849 0 R
-/ToUnicode 4848 0 R
+/Widths 4850 0 R
+/ToUnicode 4849 0 R
 >>
 endobj
 30 0 obj
@@ -10177,11 +10177,11 @@ endobj
 << /Type /Font
 /BaseFont /260f03+NotoSerif-Italic
 /Subtype /TrueType
-/FontDescriptor 4851 0 R
+/FontDescriptor 4852 0 R
 /FirstChar 32
 /LastChar 255
-/Widths 4853 0 R
-/ToUnicode 4852 0 R
+/Widths 4854 0 R
+/ToUnicode 4853 0 R
 >>
 endobj
 34 0 obj
@@ -10199,11 +10199,11 @@ endobj
 << /Type /Font
 /BaseFont /c7d210+mplus1mn-regular
 /Subtype /TrueType
-/FontDescriptor 4855 0 R
+/FontDescriptor 4856 0 R
 /FirstChar 32
 /LastChar 255
-/Widths 4857 0 R
-/ToUnicode 4856 0 R
+/Widths 4858 0 R
+/ToUnicode 4857 0 R
 >>
 endobj
 36 0 obj
@@ -10229,11 +10229,11 @@ endobj
 << /Type /Font
 /BaseFont /34c70d+NotoSerif
 /Subtype /TrueType
-/FontDescriptor 4859 0 R
+/FontDescriptor 4860 0 R
 /FirstChar 32
 /LastChar 255
-/Widths 4861 0 R
-/ToUnicode 4860 0 R
+/Widths 4862 0 R
+/ToUnicode 4861 0 R
 >>
 endobj
 39 0 obj
@@ -10667,7 +10667,7 @@ endobj
 /F5.1 45 0 R
 /F2.0 29 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -10676,11 +10676,11 @@ endobj
 << /Type /Font
 /BaseFont /26ec65+FontAwesome
 /Subtype /TrueType
-/FontDescriptor 4863 0 R
+/FontDescriptor 4864 0 R
 /FirstChar 32
 /LastChar 255
-/Widths 4865 0 R
-/ToUnicode 4864 0 R
+/Widths 4866 0 R
+/ToUnicode 4865 0 R
 >>
 endobj
 46 0 obj
@@ -10738,7 +10738,7 @@ endobj
 /Font << /F2.0 29 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -10823,7 +10823,7 @@ endobj
 /Font << /F2.0 29 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [52 0 R]
@@ -12396,7 +12396,7 @@ endobj
 /F1.1 38 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [57 0 R 59 0 R]
@@ -13516,7 +13516,7 @@ endobj
 /F1.0 10 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -14395,7 +14395,7 @@ endobj
 /F3.0 33 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [64 0 R]
@@ -15185,7 +15185,7 @@ endobj
 /Font << /F4.0 35 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -15904,7 +15904,7 @@ endobj
 /Font << /F4.0 35 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -16780,7 +16780,7 @@ endobj
 /F2.0 29 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [73 0 R 74 0 R 75 0 R 76 0 R]
@@ -17770,7 +17770,7 @@ endobj
 /F1.0 10 0 R
 /F3.0 33 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [79 0 R]
@@ -18693,7 +18693,7 @@ endobj
 /F4.0 35 0 R
 /F3.0 33 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -20204,7 +20204,7 @@ endobj
 /F4.0 35 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [84 0 R 86 0 R]
@@ -21552,7 +21552,7 @@ endobj
 /F4.0 35 0 R
 /F3.0 33 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [90 0 R]
@@ -22626,7 +22626,7 @@ endobj
 /F1.0 10 0 R
 /F3.0 33 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -23349,7 +23349,7 @@ endobj
 /F1.0 10 0 R
 /F3.0 33 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -23988,7 +23988,7 @@ endobj
 /F4.0 35 0 R
 /F2.0 29 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [97 0 R 98 0 R 99 0 R 101 0 R]
@@ -24232,7 +24232,7 @@ endobj
 /Font << /F2.0 29 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [105 0 R 106 0 R 107 0 R 108 0 R]
@@ -25104,7 +25104,7 @@ endobj
 /F4.0 35 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [112 0 R 113 0 R]
@@ -25384,7 +25384,7 @@ endobj
 /F1.0 10 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -26864,7 +26864,7 @@ endobj
 /F5.1 45 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [120 0 R 121 0 R 122 0 R 123 0 R 124 0 R 125 0 R 126 0 R 128 0 R 129 0 R 130 0 R 131 0 R]
@@ -27904,7 +27904,7 @@ endobj
 /F2.0 29 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [134 0 R 136 0 R]
@@ -28657,7 +28657,7 @@ endobj
 /F2.0 29 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [139 0 R 141 0 R 142 0 R 143 0 R 144 0 R]
@@ -31216,7 +31216,7 @@ endobj
 /F2.0 29 0 R
 /F5.1 45 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [147 0 R 148 0 R 149 0 R 150 0 R 151 0 R 152 0 R 153 0 R]
@@ -32017,7 +32017,7 @@ endobj
 /F2.0 29 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [159 0 R 160 0 R]
@@ -32028,12 +32028,12 @@ endobj
 endobj
 157 0 obj
 << /Limits [(__anchor-top) (adding.new.node)]
-/Names [(__anchor-top) 25 0 R (__indexterm-7551904) 3527 0 R (__indexterm-7554308) 3529 0 R (__indexterm-7555856) 3531 0 R (__indexterm-7558376) 3534 0 R (acid) 931 0 R (acl) 3337 0 R (add-metric-name-and-function-to-hadoop-compat-interface) 3628 0 R (add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3630 0 R (add.metrics) 3626 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3869 0 R (adding.new.node) 3083 0 R]
+/Names [(__anchor-top) 25 0 R (__indexterm-7552678) 3527 0 R (__indexterm-7555082) 3529 0 R (__indexterm-7556630) 3531 0 R (__indexterm-7559150) 3534 0 R (acid) 932 0 R (acl) 3338 0 R (add-metric-name-and-function-to-hadoop-compat-interface) 3627 0 R (add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3629 0 R (add.metrics) 3625 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3868 0 R (adding.new.node) 3084 0 R]
 >>
 endobj
 158 0 obj
 << /Limits [(io.storefile.bloom.block.size) (jdk-version-requirements)]
-/Names [(io.storefile.bloom.block.size) 355 0 R (irbrc) 794 0 R (irc) 3368 0 R (isolate-system-tables) 3334 0 R (java) 119 0 R (java-2) 1917 0 R (java-3) 1922 0 R (java.client.config) 515 0 R (jdk-issues) 2898 0 R (jdk-version-requirements) 56 0 R]
+/Names [(io.storefile.bloom.block.size) 358 0 R (irbrc) 795 0 R (irc) 3369 0 R (isolate-system-tables) 3335 0 R (java) 119 0 R (java-2) 1918 0 R (java-3) 1923 0 R (java.client.config) 516 0 R (jdk-issues) 2899 0 R (jdk-version-requirements) 56 0 R]
 >>
 endobj
 159 0 obj
@@ -33172,7 +33172,7 @@ endobj
 /F4.0 35 0 R
 /F1.1 38 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [165 0 R 166 0 R 168 0 R]
@@ -33972,7 +33972,7 @@ endobj
 /F5.1 45 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [173 0 R 174 0 R 175 0 R 177 0 R 178 0 R 180 0 R 181 0 R]
@@ -35444,7 +35444,7 @@ endobj
 /F1.0 10 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [184 0 R 185 0 R]
@@ -35869,7 +35869,7 @@ endobj
 /F4.0 35 0 R
 /F3.0 33 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -36525,7 +36525,7 @@ endobj
 /F3.0 33 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [191 0 R]
@@ -37292,7 +37292,7 @@ endobj
 /F4.0 35 0 R
 /F1.1 38 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [196 0 R]
@@ -37823,7 +37823,7 @@ endobj
 /F3.0 33 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -37839,7 +37839,7 @@ endobj
 endobj
 206 0 obj
 << /Limits [(getshortmidpointkey-an-optimization-for-data-index-block) (handling-of-errors-during-log-splitting)]
-/Names [(getshortmidpointkey-an-optimization-for-data-index-block) 4048 0 R (getting.involved) 3357 0 R (getting_started) 48 0 R (git.best.practices) 3631 0 R (git.patch.flow) 3685 0 R (goals) 4142 0 R (guide-for-hbase-committers) 3659 0 R (guidelines-for-deploying-a-coprocessor) 2367 0 R (guidelines-for-reporting-effective-issues) 3375 0 R (hadoop) 140 0 R (hadoop.native.lib) 3966 0 R (hadoop.policy.file) 382 0 R (handling-of-errors-during-log-splitting) 1659 0 R]
+/Names [(getshortmidpointkey-an-optimization-for-data-index-block) 4049 0 R (getting.involved) 3358 0 R (getting_started) 48 0 R (git.best.practices) 3630 0 R (git.patch.flow) 3684 0 R (goals) 4143 0 R (guide-for-hbase-committers) 3658 0 R (guidelines-for-deploying-a-coprocessor) 2368 0 R (guidelines-for-reporting-effective-issues) 3376 0 R (hadoop) 140 0 R (hadoop.native.lib) 3965 0 R (hadoop.policy.file) 383 0 R (handling-of-errors-during-log-splitting) 1660 0 R]
 >>
 endobj
 207 0 obj
@@ -38369,7 +38369,7 @@ endobj
 /F3.0 33 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -38891,7 +38891,7 @@ endobj
 /F1.0 10 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -38900,8 +38900,8 @@ endobj
 [219 0 R /XYZ 0 669.5005 null]
 endobj
 221 0 obj
-<< /Limits [(hbase.master.logcleaner.ttl) (hbase.mob.compaction.mergeable.threshold)]
-/Names [(hbase.master.logcleaner.ttl) 213 0 R (hbase.master.mob.ttl.cleaner.period) 494 0 R (hbase.master.normalizer.class) 462 0 R (hbase.master.port) 207 0 R (hbase.master.procedurewalcleaner.ttl) 214 0 R (hbase.master.wait.on.service.seconds) 509 0 R (hbase.mob.cache.evict.period) 492 0 R (hbase.mob.cache.evict.remain.ratio) 493 0 R (hbase.mob.compaction.batch.size) 499 0 R (hbase.mob.compaction.chore.period) 500 0 R (hbase.mob.compaction.mergeable.threshold) 495 0 R]
+<< /Limits [(hbase.master.logcleaner.ttl) (hbase.mob.delfile.max.count)]
+/Names [(hbase.master.logcleaner.ttl) 213 0 R (hbase.master.mob.ttl.cleaner.period) 495 0 R (hbase.master.normalizer.class) 466 0 R (hbase.master.port) 207 0 R (hbase.master.procedurewalcleaner.ttl) 214 0 R (hbase.master.wait.on.service.seconds) 510 0 R (hbase.mob.cache.evict.period) 493 0 R (hbase.mob.cache.evict.remain.ratio) 494 0 R (hbase.mob.compaction.batch.size) 500 0 R (hbase.mob.compaction.chore.period) 502 0 R (hbase.mob.compaction.mergeable.threshold) 498 0 R (hbase.mob.compaction.threads.max) 504 0 R (hbase.mob.compactor.class) 503 0 R (hbase.mob.delfile.max.count) 499 0 R]
 >>
 endobj
 222 0 obj
@@ -39526,7 +39526,7 @@ endobj
 /F3.0 33 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -40035,7 +40035,7 @@ endobj
 /F3.0 33 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -40057,7 +40057,7 @@ endobj
 endobj
 239 0 obj
 << /Limits [(hbase.regionserver.kerberos.principal) (hbase.regionserver.thread.compaction.throttle)]
-/Names [(hbase.regionserver.kerberos.principal) 381 0 R (hbase.regionserver.keytab.file) 380 0 R (hbase.regionserver.logroll.errors.tolerated) 235 0 R (hbase.regionserver.logroll.period) 234 0 R (hbase.regionserver.majorcompaction.pagecache.drop) 336 0 R (hbase.regionserver.minorcompaction.pagecache.drop) 337 0 R (hbase.regionserver.msginterval) 231 0 R (hbase.regionserver.optionalcacheflushinterval) 244 0 R (hbase.regionserver.port) 220 0 R (hbase.regionserver.region.split.policy) 247 0 R (hbase.regionserver.regionSplitLimit) 248 0 R (hbase.regionserver.storefile.refresh.period) 478 0 R (hbase.regionserver.thread.compaction.throttle) 335 0 R]
+/Names [(hbase.regionserver.kerberos.principal) 382 0 R (hbase.regionserver.keytab.file) 381 0 R (hbase.regionserver.logroll.errors.tolerated) 235 0 R (hbase.regionserver.logroll.period) 234 0 R (hbase.regionserver.majorcompaction.pagecache.drop) 337 0 R (hbase.regionserver.minorcompaction.pagecache.drop) 338 0 R (hbase.regionserver.msginterval) 231 0 R (hbase.regionserver.optionalcacheflushinterval) 244 0 R (hbase.regionserver.port) 220 0 R (hbase.regionserver.region.split.policy) 247 0 R (hbase.regionserver.regionSplitLimit) 248 0 R (hbase.regionserver.storefile.refresh.period) 479 0 R (hbase.regionserver.thread.compaction.throttle) 336 0 R]
 >>
 endobj
 240 0 obj
@@ -40566,7 +40566,7 @@ endobj
 /F3.0 33 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -41145,7 +41145,7 @@ endobj
 /F3.0 33 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 /Annots [252 0 R 253 0 R]
@@ -41712,7 +41712,7 @@ endobj
 /F1.0 10 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [261 0 R 262 0 R 264 0 R 265 0 R]
@@ -41776,7 +41776,7 @@ endobj
 endobj
 268 0 obj
 << /Limits [(hbase.table.lock.enable) (hbase.tmp.dir)]
-/Names [(hbase.table.lock.enable) 407 0 R (hbase.table.max.rowsize) 408 0 R (hbase.tags) 1350 0 R (hbase.tests) 3509 0 R (hbase.tests.categories) 3554 0 R (hbase.tests.cluster) 3556 0 R (hbase.tests.example.code) 3557 0 R (hbase.tests.rules) 3550 0 R (hbase.tests.sleeps) 3555 0 R (hbase.tests.writing) 3549 0 R (hbase.thrift.maxQueuedRequests) 414 0 R (hbase.thrift.maxWorkerThreads) 413 0 R (hbase.thrift.minWorkerThreads) 412 0 R (hbase.tmp.dir) 198 0 R]
+/Names [(hbase.table.lock.enable) 408 0 R (hbase.table.max.rowsize) 411 0 R (hbase.tags) 1351 0 R (hbase.tests) 3510 0 R (hbase.tests.categories) 3554 0 R (hbase.tests.cluster) 3556 0 R (hbase.tests.example.code) 3557 0 R (hbase.tests.rules) 3550 0 R (hbase.tests.sleeps) 3555 0 R (hbase.tests.writing) 3549 0 R (hbase.thrift.maxQueuedRequests) 415 0 R (hbase.thrift.maxWorkerThreads) 414 0 R (hbase.thrift.minWorkerThreads) 413 0 R (hbase.tmp.dir) 198 0 R]
 >>
 endobj
 269 0 obj
@@ -42332,7 +42332,7 @@ endobj
 /F1.0 10 0 R
 /F1.1 38 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -42927,7 +42927,7 @@ endobj
 /F4.0 35 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 /Annots [285 0 R 286 0 R]
@@ -42947,7 +42947,7 @@ endobj
 endobj
 284 0 obj
 << /Limits [(hbase.cluster.distributed) (hbase.data.umask.enable)]
-/Names [(hbase.cluster.distributed) 200 0 R (hbase.column.max.version) 433 0 R (hbase.commit.msg.format) 3811 0 R (hbase.coordinated.state.manager.class) 476 0 R (hbase.coprocessor.abortonerror) 398 0 R (hbase.coprocessor.enabled) 391 0 R (hbase.coprocessor.master.classes) 397 0 R (hbase.coprocessor.region.classes) 396 0 R (hbase.coprocessor.user.enabled) 393 0 R (hbase.data.umask) 424 0 R (hbase.data.umask.enable) 423 0 R]
+/Names [(hbase.cluster.distributed) 200 0 R (hbase.column.max.version) 434 0 R (hbase.commit.msg.format) 3810 0 R (hbase.coordinated.state.manager.class) 477 0 R (hbase.coprocessor.abortonerror) 399 0 R (hbase.coprocessor.enabled) 392 0 R (hbase.coprocessor.master.classes) 398 0 R (hbase.coprocessor.region.classes) 397 0 R (hbase.coprocessor.user.enabled) 396 0 R (hbase.data.umask) 425 0 R (hbase.data.umask.enable) 424 0 R]
 >>
 endobj
 285 0 obj
@@ -43493,7 +43493,7 @@ endobj
 /F3.0 33 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
@@ -43517,7 +43517,7 @@ endobj
 [289 0 R /XYZ 0 116.81 null]
 endobj
 296 0 obj
-<< /Length 7177
+<< /Length 6859
 >>
 stream
 q
@@ -43672,7 +43672,7 @@ ET
 BT
 48.24 516.565 Td
 /F4.0 10.5 Tf
-<68626173652e726567696f6e732e736c6f70> Tj
+<68626173652e6e6f726d616c697a65722e6d696e2e726567696f6e2e636f756e74> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43691,40 +43691,10 @@ ET
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
-1.1055 Tw
-
 BT
 63.24 480.39 Td
 /F1.0 10.5 Tf
-[<526562616c616e636520696620616e> 20.0195 <7920726567696f6e736572766572206861732061766572> 20.0195 <616765202b202861766572> 20.0195 <616765202a20736c6f702920726567696f6e732e205468652064656661756c742076616c7565206f662074686973>] TJ
-ET
-
-
-0.0 Tw
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-0.2 0.2 0.2 scn
-0.2 0.2 0.2 SCN
-
-0.2441 Tw
-
-BT
-63.24 464.61 Td
-/F1.0 10.5 Tf
-[<706172> 20.0195 <616d6574657220697320302e30303120696e2053746f636861737469634c6f616442616c616e63657220287468652064656661756c74206c6f61642062616c616e636572292c207768696c65207468652064656661756c7420697320302e32>] TJ
-ET
-
-
-0.0 Tw
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-0.2 0.2 0.2 scn
-0.2 0.2 0.2 SCN
-
-BT
-63.24 448.83 Td
-/F1.0 10.5 Tf
-<696e206f74686572206c6f61642062616c616e636572732028692e652e2c2053696d706c654c6f616442616c616e636572292e> Tj
+<636f6e66696775726520746865206d696e696d756d206e756d626572206f6620726567696f6e73> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43733,7 +43703,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 421.6482 Td
+63.24 453.2082 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -43744,9 +43714,9 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 404.243 Td
+63.24 435.803 Td
 /F4.0 10.5 Tf
-<302e303031> Tj
+<33> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43755,9 +43725,9 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 380.243 Td
+48.24 411.803 Td
 /F4.0 10.5 Tf
-<68626173652e7365727665722e7468726561642e77616b656672657175656e6379> Tj
+<68626173652e726567696f6e732e736c6f70> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43766,7 +43736,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 363.6572 Td
+63.24 395.2172 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -43776,12 +43746,12 @@ ET
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
-0.9598 Tw
+1.1055 Tw
 
 BT
-63.24 344.068 Td
+63.24 375.628 Td
 /F1.0 10.5 Tf
-[<54696d6520746f20736c65657020696e206265747765656e20736561726368657320666f7220776f726b2028696e206d696c6c697365636f6e6473292e205573656420617320736c65657020696e74657276616c2062> 20.0195 <792073657276696365>] TJ
+[<526562616c616e636520696620616e> 20.0195 <7920726567696f6e736572766572206861732061766572> 20.0195 <616765202b202861766572> 20.0195 <616765202a20736c6f702920726567696f6e732e205468652064656661756c742076616c7565206f662074686973>] TJ
 ET
 
 
@@ -43791,19 +43761,34 @@ ET
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
+0.2441 Tw
+
 BT
-63.24 328.288 Td
+63.24 359.848 Td
 /F1.0 10.5 Tf
-<746872656164732073756368206173206c6f6720726f6c6c65722e> Tj
+[<706172> 20.0195 <616d6574657220697320302e30303120696e2053746f636861737469634c6f616442616c616e63657220287468652064656661756c74206c6f61642062616c616e636572292c207768696c65207468652064656661756c7420697320302e32>] TJ
 ET
 
+
+0.0 Tw
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
 BT
-63.24 301.1062 Td
+63.24 344.068 Td
+/F1.0 10.5 Tf
+<696e206f74686572206c6f61642062616c616e636572732028692e652e2c2053696d706c654c6f616442616c616e636572292e> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+63.24 316.8862 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -43814,9 +43799,9 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 283.701 Td
+63.24 299.481 Td
 /F4.0 10.5 Tf
-<3130303030> Tj
+<302e303031> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43825,9 +43810,9 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 259.701 Td
+48.24 275.481 Td
 /F4.0 10.5 Tf
-<68626173652e7365727665722e76657273696f6e66696c652e7772697465617474656d707473> Tj
+<68626173652e7365727665722e7468726561642e77616b656672657175656e6379> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43836,7 +43821,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 243.1152 Td
+63.24 258.8952 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -43846,12 +43831,12 @@ ET
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
-0.5114 Tw
+0.9598 Tw
 
 BT
-63.24 223.526 Td
+63.24 239.306 Td
 /F1.0 10.5 Tf
-[<486f77206d616e> 20.0195 <792074696d657320746f20726574727920617474656d7074696e6720746f20777269746520612076657273696f6e2066696c65206265666f7265206a7573742061626f7274696e672e204561636820617474656d7074206973>] TJ
+[<54696d6520746f20736c65657020696e206265747765656e20736561726368657320666f7220776f726b2028696e206d696c6c697365636f6e6473292e205573656420617320736c65657020696e74657276616c2062> 20.0195 <792073657276696365>] TJ
 ET
 
 
@@ -43862,9 +43847,9 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 207.746 Td
+63.24 223.526 Td
 /F1.0 10.5 Tf
-[<7365706172> 20.0195 <617465642062> 20.0195 <79207468652068626173652e7365727665722e7468726561642e77616b> 20.0195 <656672657175656e6379206d696c6c697365636f6e64732e>] TJ
+<746872656164732073756368206173206c6f6720726f6c6c65722e> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43873,7 +43858,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 180.5642 Td
+63.24 196.3442 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -43884,9 +43869,9 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 163.159 Td
+63.24 178.939 Td
 /F4.0 10.5 Tf
-<33> Tj
+<3130303030> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43895,9 +43880,9 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 139.159 Td
+48.24 154.939 Td
 /F4.0 10.5 Tf
-<68626173652e68726567696f6e2e6d656d73746f72652e666c7573682e73697a65> Tj
+<68626173652e7365727665722e76657273696f6e66696c652e7772697465617474656d707473> Tj
 ET
 
 0.0 0.0 0.0 SCN
@@ -43906,7 +43891,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 122.5732 Td
+63.24 138.3532 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -43916,12 +43901,12 @@ ET
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
-0.771 Tw
+0.5114 Tw
 
 BT
-63.24 102.984 Td
+63.24 118.764 Td
 /F1.0 10.5 Tf
-[<4d656d73746f72652077696c6c20626520666c757368656420746f206469736b2069662073697a65206f6620746865206d656d73746f726520657863656564732074686973206e756d626572206f662062> 20.0195 <797465732e2056> 60.0586 <616c7565206973>] TJ
+[<486f77206d616e> 20.0195 <792074696d657320746f20726574727920617474656d7074696e6720746f20777269746520612076657273696f6e2066696c65206265666f7265206a7573742061626f7274696e672e204561636820617474656d7074206973>] TJ
 ET
 
 
@@ -43932,9 +43917,9 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 87.204 Td
+63.24 102.984 Td
 /F1.0 10.5 Tf
-[<636865636b> 20.0195 <65642062> 20.0195 <7920612074687265616420746861742072756e732065766572792068626173652e7365727665722e7468726561642e77616b> 20.0195 <656672657175656e6379> 89.8438 <2e>] TJ
+[<7365706172> 20.0195 <617465642062> 20.0195 <79207468652068626173652e7365727665722e7468726561642e77616b> 20.0195 <656672657175656e6379206d696c6c697365636f6e64732e>] TJ
 ET
 
 0.0 0.0 0.0 SCN
@@ -43943,13 +43928,24 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 60.0222 Td
+63.24 75.8022 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
 
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
+0.6941 0.1294 0.2745 scn
+0.6941 0.1294 0.2745 SCN
+
+BT
+63.24 58.397 Td
+/F4.0 10.5 Tf
+<33> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
 q
 0.0 0.0 0.0 scn
 0.0 0.0 0.0 SCN
@@ -43988,7 +43984,7 @@ endobj
 /F3.0 33 0 R
 /F4.0 35 0 R
 >>
-/XObject << /Stamp1 4613 0 R
+/XObject << /Stamp1 4614 0 R
 >>
 >>
 >>
@@ -44003,16 +43999,16 @@ endobj
 [297 0 R /XYZ 0 526.345 null]
 endobj
 301 0 obj
-[297 0 R /XYZ 0 390.023 null]
+[297 0 R /XYZ 0 421.583 null]
 endobj
 302 0 obj
-[297 0 R /XYZ 0 269.481 null]
+[297 0 R /XYZ 0 285.261 null]
 endobj
 303 0 obj
-[297 0 R /XYZ 0 148.939 null]
+[297 0 R /XYZ 0 164.719 null]
 endobj
 304 0 obj
-<< /Length 9338
+<< /Length 9496
 >>
 stream
 q
@@ -44022,7 +44018,66 @@ q
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 796.86 Td
+48.24 796.11 Td
+/F4.0 10.5 Tf
+<68626173652e68726567696f6e2e6d656d73746f72652e666c7573682e73697a65> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+63.24 779.5242 Td
+/F3.0 9.975 Tf
+<4465736372697074696f6e> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+0.771 Tw
+
+BT
+63.24 759.935 Td
+/F1.0 10.5 Tf
+[<4d656d73746f72652077696c6c20626520666c757368656420746f206469736b2069662073697a65206f6620746865206d656d73746f726520657863656564732074686973206e756d626572206f662062> 20.0195 <797465732e2056> 60.0586 <616c7565206973>] TJ
+ET
+
+
+0.0 Tw
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+63.24 744.155 Td
+/F1.0 10.5 Tf
+[<636865636b> 20.0195 <65642062> 20.0195 <7920612074687265616420746861742072756e732065766572792068626173652e7365727665722e7468726561642e77616b> 20.0195 <656672657175656e6379> 89.8438 <2e>] TJ
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+63.24 716.9732 Td
+/F3.0 9.975 Tf
+<44656661756c74> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.6941 0.1294 0.2745 scn
+0.6941 0.1294 0.2745 SCN
+
+BT
+63.24 699.568 Td
 /F4.0 10.5 Tf
 <313334323137373238> Tj
 ET
@@ -44033,7 +44088,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 772.86 Td
+48.24 675.568 Td
 /F4.0 10.5 Tf
 <68626173652e68726567696f6e2e706572636f6c756d6e66616d696c79666c7573682e73697a652e6c6f7765722e626f756e642e6d696e> Tj
 ET
@@ -44044,7 +44099,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 756.2742 Td
+63.24 658.9822 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44057,7 +44112,7 @@ ET
 1.0784 Tw
 
 BT
-63.24 736.685 Td
+63.24 639.393 Td
 /F1.0 10.5 Tf
 <496620466c7573684c6172676553746f726573506f6c696379206973207573656420616e6420746865726520617265206d756c7469706c6520636f6c756d6e2066616d696c6965732c207468656e2065766572792074696d652074686174> Tj
 ET
@@ -44072,7 +44127,7 @@ ET
 0.7003 Tw
 
 BT
-63.24 720.905 Td
+63.24 623.613 Td
 /F1.0 10.5 Tf
 <7765206869742074686520746f74616c206d656d73746f7265206c696d69742c2077652066696e64206f757420616c6c2074686520636f6c756d6e2066616d696c6965732077686f7365206d656d73746f726573206578636565642061> Tj
 ET
@@ -44087,7 +44142,7 @@ ET
 1.3808 Tw
 
 BT
-63.24 705.125 Td
+63.24 607.833 Td
 /F1.0 10.5 Tf
 [<226c6f77657220626f756e642220616e64206f6e6c7920666c757368207468656d207768696c652072657461696e696e6720746865206f746865727320696e206d656d6f7279> 89.8438 <2e2054686520226c6f77657220626f756e6422>] TJ
 ET
@@ -44102,7 +44157,7 @@ ET
 0.7049 Tw
 
 BT
-63.24 689.345 Td
+63.24 592.053 Td
 /F1.0 10.5 Tf
 [<77696c6c206265202268626173652e68726567696f6e2e6d656d73746f72652e666c7573682e73697a65202f20636f6c756d6e5f66616d696c795f6e756d626572222062> 20.0195 <792064656661756c7420756e6c6573732076616c7565206f66>] TJ
 ET
@@ -44117,7 +44172,7 @@ ET
 1.7064 Tw
 
 BT
-63.24 673.565 Td
+63.24 576.273 Td
 /F1.0 10.5 Tf
 <746869732070726f7065727479206973206c6172676572207468616e20746861742e204966206e6f6e65206f66207468652066616d696c6965732068617665207468656972206d656d73746f72652073697a65206d6f7265207468616e> Tj
 ET
@@ -44130,7 +44185,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 657.785 Td
+63.24 560.493 Td
 /F1.0 10.5 Tf
 <6c6f77657220626f756e642c20616c6c20746865206d656d73746f7265732077696c6c20626520666c757368656420286a75737420617320757375616c292e> Tj
 ET
@@ -44141,7 +44196,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 630.6032 Td
+63.24 533.3112 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44152,7 +44207,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 613.198 Td
+63.24 515.906 Td
 /F4.0 10.5 Tf
 <3136373737323136> Tj
 ET
@@ -44163,7 +44218,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 589.198 Td
+48.24 491.906 Td
 /F4.0 10.5 Tf
 <68626173652e68726567696f6e2e707265636c6f73652e666c7573682e73697a65> Tj
 ET
@@ -44174,7 +44229,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 572.6122 Td
+63.24 475.3202 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44187,7 +44242,7 @@ ET
 0.1308 Tw
 
 BT
-63.24 553.023 Td
+63.24 455.731 Td
 /F1.0 10.5 Tf
 [<496620746865206d656d73746f72657320696e206120726567696f6e2061726520746869732073697a65206f72206c6172676572207768656e20776520676f20746f20636c6f73652c2072756e206120227072652d666c757368> 40.0391 <2220746f20636c656172>] TJ
 ET
@@ -44202,7 +44257,7 @@ ET
 1.0921 Tw
 
 BT
-63.24 537.243 Td
+63.24 439.951 Td
 /F1.0 10.5 Tf
 [<6f7574206d656d73746f726573206265666f7265207765207075742075702074686520726567696f6e20636c6f73656420666c616720616e642074616b> 20.0195 <652074686520726567696f6e206f66666c696e652e204f6e20636c6f73652c2061>] TJ
 ET
@@ -44217,7 +44272,7 @@ ET
 0.4929 Tw
 
 BT
-63.24 521.463 Td
+63.24 424.171 Td
 /F1.0 10.5 Tf
 [<666c7573682069732072756e20756e6465722074686520636c6f736520666c616720746f20656d707479206d656d6f7279> 89.8438 <2e20447572696e6720746869732074696d652074686520726567696f6e206973206f66666c696e6520616e64207765>] TJ
 ET
@@ -44232,7 +44287,7 @@ ET
 0.109 Tw
 
 BT
-63.24 505.683 Td
+63.24 408.391 Td
 /F1.0 10.5 Tf
 [<617265206e6f742074616b696e67206f6e20616e> 20.0195 <79207772697465732e20496620746865206d656d73746f726520636f6e74656e74206973206c617267652c207468697320666c75736820636f756c642074616b> 20.0195 <652061206c6f6e672074696d6520746f>] TJ
 ET
@@ -44247,7 +44302,7 @@ ET
 1.5902 Tw
 
 BT
-63.24 489.903 Td
+63.24 392.611 Td
 /F1.0 10.5 Tf
 <636f6d706c6574652e2054686520707265666c757368206973206d65616e7420746f20636c65616e206f7574207468652062756c6b206f6620746865206d656d73746f7265206265666f72652070757474696e6720757020746865> Tj
 ET
@@ -44260,7 +44315,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 474.123 Td
+63.24 376.831 Td
 /F1.0 10.5 Tf
 <636c6f736520666c616720616e642074616b696e672074686520726567696f6e206f66666c696e6520736f2074686520666c75736820746861742072756e7320756e6465722074686520636c6f736520666c616720686173206c6974746c6520746f20646f2e> Tj
 ET
@@ -44271,7 +44326,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 446.9412 Td
+63.24 349.6492 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44282,7 +44337,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 429.536 Td
+63.24 332.244 Td
 /F4.0 10.5 Tf
 <35323432383830> Tj
 ET
@@ -44293,7 +44348,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 405.536 Td
+48.24 308.244 Td
 /F4.0 10.5 Tf
 <68626173652e68726567696f6e2e6d656d73746f72652e626c6f636b2e6d756c7469706c696572> Tj
 ET
@@ -44304,7 +44359,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 388.9502 Td
+63.24 291.6582 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44317,7 +44372,7 @@ ET
 15.2166 Tw
 
 BT
-63.24 369.361 Td
+63.24 272.069 Td
 /F1.0 10.5 Tf
 <426c6f636b2075706461746573206966206d656d73746f7265206861732068626173652e68726567696f6e2e6d656d73746f72652e626c6f636b2e6d756c7469706c6965722074696d6573> Tj
 ET
@@ -44332,7 +44387,7 @@ ET
 0.3547 Tw
 
 BT
-63.24 353.581 Td
+63.24 256.289 Td
 /F1.0 10.5 Tf
 [<68626173652e68726567696f6e2e6d656d73746f72652e666c7573682e73697a652062> 20.0195 <797465732e2055736566756c2070726576656e74696e672072756e617761> 20.0195 <79206d656d73746f726520647572696e67207370696b> 20.0195 <657320696e>] TJ
 ET
@@ -44347,7 +44402,7 @@ ET
 1.1027 Tw
 
 BT
-63.24 337.801 Td
+63.24 240.509 Td
 /F1.0 10.5 Tf
 [<757064617465207472> 20.0195 <61666669632e20576974686f757420616e2075707065722d626f756e642c206d656d73746f72652066696c6c7320737563682074686174207768656e20697420666c75736865732074686520726573756c74616e74>] TJ
 ET
@@ -44360,7 +44415,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 322.021 Td
+63.24 224.729 Td
 /F1.0 10.5 Tf
 [<666c7573682066696c65732074616b> 20.0195 <652061206c6f6e672074696d6520746f20636f6d70616374206f722073706c69742c206f7220776f7273652c207765204f4f4d452e>] TJ
 ET
@@ -44371,7 +44426,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 294.8392 Td
+63.24 197.5472 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44382,7 +44437,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 277.434 Td
+63.24 180.142 Td
 /F4.0 10.5 Tf
 <34> Tj
 ET
@@ -44393,7 +44448,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 253.434 Td
+48.24 156.142 Td
 /F4.0 10.5 Tf
 <68626173652e68726567696f6e2e6d656d73746f72652e6d736c61622e656e61626c6564> Tj
 ET
@@ -44404,7 +44459,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 236.8482 Td
+63.24 139.5562 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44417,7 +44472,7 @@ ET
 4.9275 Tw
 
 BT
-63.24 217.259 Td
+63.24 119.967 Td
 /F1.0 10.5 Tf
 <456e61626c657320746865204d656d53746f72652d4c6f63616c20416c6c6f636174696f6e204275666665722c2061206665617475726520776869636820776f726b7320746f2070726576656e742068656170> Tj
 ET
@@ -44432,7 +44487,7 @@ ET
 2.1282 Tw
 
 BT
-63.24 201.479 Td
+63.24 104.187 Td
 /F1.0 10.5 Tf
 [<6672> 20.0195 <61676d656e746174696f6e20756e646572206865617679207772697465206c6f6164732e20546869732063616e2072656475636520746865206672657175656e6379206f662073746f702d7468652d776f726c64204743>] TJ
 ET
@@ -44445,7 +44500,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 185.699 Td
+63.24 88.407 Td
 /F1.0 10.5 Tf
 <706175736573206f6e206c617267652068656170732e> Tj
 ET
@@ -44456,72 +44511,13 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 158.5172 Td
+63.24 61.2252 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
 
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
-0.6941 0.1294 0.2745 scn
-0.6941 0.1294 0.2745 SCN
-
-BT
-63.24 141.112 Td
-/F4.0 10.5 Tf
-<74727565> Tj
-ET
-
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-0.6941 0.1294 0.2745 scn
-0.6941 0.1294 0.2745 SCN
-
-BT
-48.24 117.112 Td
-/F4.0 10.5 Tf
-<68626173652e68726567696f6e2e6d61782e66696c6573697a65> Tj
-ET
-
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-0.2 0.2 0.2 scn
-0.2 0.2 0.2 SCN
-
-BT
-63.24 100.5262 Td
-/F3.0 9.975 Tf
-<4465736372697074696f6e> Tj
-ET
-
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-0.2 0.2 0.2 scn
-0.2 0.2 0.2 SCN
-
-0.5794 Tw
-
-BT
-63.24 80.937 Td
-/F1.0 10.5 Tf
-<4d6178696d756d204846696c652073697a652e204966207468652073756d206f66207468652073697a6573206f66206120726567696f6ed573204846696c6573206861732067726f776e20746f2065786365656420746869732076616c75652c> Tj
-ET
-
-
-0.0 Tw
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-0.2 0.2 0.2 scn
-0.2 0.2 0.2 SCN
-
-BT
-63.24 65.157 Td
-/F1.0 10.5 Tf
-<74686520726567696f6e2069732073706c697420696e2074776f2e> Tj
-ET
-
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
 q
 0.0 0.0 0.0 scn
 0.0 0.0 0.0 SCN
@@ -44560,38 +44556,97 @@ endobj
 /F3.0 33 0 R
 /F1.0 10 0 R
 >>
-/XObject << /Stamp2 4614 0 R
+/XObject << /Stamp2 4615 0 R
 >>
 >>
 >>
 endobj
 306 0 obj
-[305 0 R /XYZ 0 782.64 null]
+[305 0 R /XYZ 0 805.89 null]
 endobj
 307 0 obj
-[305 0 R /XYZ 0 598.978 null]
+[305 0 R /XYZ 0 685.348 null]
 endobj
 308 0 obj
-[305 0 R /XYZ 0 415.316 null]
+[305 0 R /XYZ 0 501.686 null]
 endobj
 309 0 obj
-[305 0 R /XYZ 0 263.214 null]
+[305 0 R /XYZ 0 318.024 null]
 endobj
 310 0 obj
-[305 0 R /XYZ 0 126.892 null]
+[305 0 R /XYZ 0 165.922 null]
 endobj
 311 0 obj
-<< /Length 9084
+<< /Length 8783
 >>
 stream
 q
 /DeviceRGB cs
-0.2 0.2 0.2 scn
+0.6941 0.1294 0.2745 scn
 /DeviceRGB CS
+0.6941 0.1294 0.2745 SCN
+
+BT
+63.24 796.86 Td
+/F4.0 10.5 Tf
+<74727565> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.6941 0.1294 0.2745 scn
+0.6941 0.1294 0.2745 SCN
+
+BT
+48.24 772.86 Td
+/F4.0 10.5 Tf
+<68626173652e68726567696f6e2e6d61782e66696c6573697a65> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
 BT
-63.24 795.2367 Td
+63.24 756.2742 Td
+/F3.0 9.975 Tf
+<4465736372697074696f6e> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+0.5794 Tw
+
+BT
+63.24 736.685 Td
+/F1.0 10.5 Tf
+<4d6178696d756d204846696c652073697a652e204966207468652073756d206f66207468652073697a6573206f66206120726567696f6ed573204846696c6573206861732067726f776e20746f2065786365656420746869732076616c75652c> Tj
+ET
+
+
+0.0 Tw
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+63.24 720.905 Td
+/F1.0 10.5 Tf
+<74686520726567696f6e2069732073706c697420696e2074776f2e> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+63.24 693.7232 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44602,7 +44657,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 777.8315 Td
+63.24 676.318 Td
 /F4.0 10.5 Tf
 <3130373337343138323430> Tj
 ET
@@ -44613,7 +44668,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 753.8315 Td
+48.24 652.318 Td
 /F4.0 10.5 Tf
 <68626173652e68726567696f6e2e6d616a6f72636f6d70616374696f6e> Tj
 ET
@@ -44624,7 +44679,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 737.2457 Td
+63.24 635.7322 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44637,7 +44692,7 @@ ET
 2.5455 Tw
 
 BT
-63.24 717.6565 Td
+63.24 616.143 Td
 /F1.0 10.5 Tf
 <54696d65206265747765656e206d616a6f7220636f6d70616374696f6e732c2065787072657373656420696e206d696c6c697365636f6e64732e2053657420746f203020746f2064697361626c652074696d652d6261736564> Tj
 ET
@@ -44652,7 +44707,7 @@ ET
 1.2644 Tw
 
 BT
-63.24 701.8765 Td
+63.24 600.363 Td
 /F1.0 10.5 Tf
 <6175746f6d61746963206d616a6f7220636f6d70616374696f6e732e20557365722d72657175657374656420616e642073697a652d6261736564206d616a6f7220636f6d70616374696f6e732077696c6c207374696c6c2072756e2e> Tj
 ET
@@ -44667,7 +44722,7 @@ ET
 0.4679 Tw
 
 BT
-63.24 686.0965 Td
+63.24 584.583 Td
 /F1.0 10.5 Tf
 [<546869732076616c7565206973206d756c7469706c6965642062> 20.0195 <792068626173652e68726567696f6e2e6d616a6f72636f6d70616374696f6e2e6a697474657220746f20636175736520636f6d70616374696f6e20746f207374617274206174>] TJ
 ET
@@ -44682,7 +44737,7 @@ ET
 0.374 Tw
 
 BT
-63.24 670.3165 Td
+63.24 568.803 Td
 /F1.0 10.5 Tf
 [<6120736f6d65776861742d72> 20.0195 <616e646f6d2074696d6520647572696e67206120676976656e2077696e646f77206f662074696d652e205468652064656661756c742076616c75652069732037206461> 20.0195 <79732c20657870726573736564>] TJ
 ET
@@ -44697,7 +44752,7 @@ ET
 2.4423 Tw
 
 BT
-63.24 654.5365 Td
+63.24 553.023 Td
 /F1.0 10.5 Tf
 <696e206d696c6c697365636f6e64732e204966206d616a6f7220636f6d70616374696f6e73206172652063617573696e672064697372757074696f6e20696e20796f757220656e7669726f6e6d656e742c20796f752063616e> Tj
 ET
@@ -44712,7 +44767,7 @@ ET
 2.7722 Tw
 
 BT
-63.24 638.7565 Td
+63.24 537.243 Td
 /F1.0 10.5 Tf
 [<636f6e666967757265207468656d20746f2072756e206174206f66662d7065616b2074696d657320666f7220796f7572206465706c6f> 20.0195 <796d656e742c206f722064697361626c652074696d652d6261736564206d616a6f72>] TJ
 ET
@@ -44727,7 +44782,7 @@ ET
 2.0923 Tw
 
 BT
-63.24 622.9765 Td
+63.24 521.463 Td
 /F1.0 10.5 Tf
 [<636f6d70616374696f6e732062> 20.0195 <792073657474696e67207468697320706172> 20.0195 <616d6574657220746f20302c20616e642072756e206d616a6f7220636f6d70616374696f6e7320696e20612063726f6e206a6f62206f722062> 20.0195 <79>] TJ
 ET
@@ -44740,7 +44795,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 607.1965 Td
+63.24 505.683 Td
 /F1.0 10.5 Tf
 <616e6f746865722065787465726e616c206d656368616e69736d2e> Tj
 ET
@@ -44751,7 +44806,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 580.0147 Td
+63.24 478.5012 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44762,7 +44817,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 562.6095 Td
+63.24 461.096 Td
 /F4.0 10.5 Tf
 <363034383030303030> Tj
 ET
@@ -44773,7 +44828,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 538.6095 Td
+48.24 437.096 Td
 /F4.0 10.5 Tf
 <68626173652e68726567696f6e2e6d616a6f72636f6d70616374696f6e2e6a6974746572> Tj
 ET
@@ -44784,7 +44839,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 522.0237 Td
+63.24 420.5102 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44797,7 +44852,7 @@ ET
 1.7489 Tw
 
 BT
-63.24 502.4345 Td
+63.24 400.921 Td
 /F1.0 10.5 Tf
 <41206d756c7469706c696572206170706c69656420746f2068626173652e68726567696f6e2e6d616a6f72636f6d70616374696f6e20746f20636175736520636f6d70616374696f6e20746f206f63637572206120676976656e> Tj
 ET
@@ -44812,7 +44867,7 @@ ET
 2.4047 Tw
 
 BT
-63.24 486.6545 Td
+63.24 385.141 Td
 /F1.0 10.5 Tf
 <616d6f756e74206f662074696d65206569746865722073696465206f662068626173652e68726567696f6e2e6d616a6f72636f6d70616374696f6e2e2054686520736d616c6c657220746865206e756d6265722c20746865> Tj
 ET
@@ -44825,7 +44880,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 470.8745 Td
+63.24 369.361 Td
 /F1.0 10.5 Tf
 <636c6f7365722074686520636f6d70616374696f6e732077696c6c2068617070656e20746f207468652068626173652e68726567696f6e2e6d616a6f72636f6d70616374696f6e20696e74657276616c2e> Tj
 ET
@@ -44836,7 +44891,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 443.6927 Td
+63.24 342.1792 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44847,7 +44902,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 426.2875 Td
+63.24 324.774 Td
 /F4.0 10.5 Tf
 <302e3530> Tj
 ET
@@ -44858,7 +44913,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 402.2875 Td
+48.24 300.774 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e5468726573686f6c64> Tj
 ET
@@ -44869,7 +44924,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 385.7017 Td
+63.24 284.1882 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44882,7 +44937,7 @@ ET
 0.7104 Tw
 
 BT
-63.24 366.1125 Td
+63.24 264.599 Td
 /F1.0 10.5 Tf
 [<4966206d6f7265207468616e2074686973206e756d626572206f662053746f726546696c657320657869737420696e20616e> 20.0195 <79206f6e652053746f726520286f6e652053746f726546696c65206973207772697474656e2070657220666c757368>] TJ
 ET
@@ -44897,7 +44952,7 @@ ET
 0.5307 Tw
 
 BT
-63.24 350.3325 Td
+63.24 248.819 Td
 /F1.0 10.5 Tf
 <6f66204d656d53746f7265292c206120636f6d70616374696f6e2069732072756e20746f207265777269746520616c6c2053746f726546696c657320696e746f20612073696e676c652053746f726546696c652e204c61726765722076616c756573> Tj
 ET
@@ -44910,7 +44965,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 334.5525 Td
+63.24 233.039 Td
 /F1.0 10.5 Tf
 [<64656c61> 20.0195 <7920636f6d70616374696f6e2c20627574207768656e20636f6d70616374696f6e20646f6573206f636375722c2069742074616b> 20.0195 <6573206c6f6e67657220746f20636f6d706c6574652e>] TJ
 ET
@@ -44921,7 +44976,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 307.3707 Td
+63.24 205.8572 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -44932,7 +44987,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 289.9655 Td
+63.24 188.452 Td
 /F4.0 10.5 Tf
 <33> Tj
 ET
@@ -44943,7 +44998,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 265.9655 Td
+48.24 164.452 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e666c75736865722e636f756e74> Tj
 ET
@@ -44954,7 +45009,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 249.3797 Td
+63.24 147.8662 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -44967,7 +45022,7 @@ ET
 1.2849 Tw
 
 BT
-63.24 229.7905 Td
+63.24 128.277 Td
 /F1.0 10.5 Tf
 <546865206e756d626572206f6620666c75736820746872656164732e205769746820666577657220746872656164732c20746865204d656d53746f726520666c75736865732077696c6c206265207175657565642e2057697468> Tj
 ET
@@ -44982,7 +45037,7 @@ ET
 3.4187 Tw
 
 BT
-63.24 214.0105 Td
+63.24 112.497 Td
 /F1.0 10.5 Tf
 [<6d6f726520746872656164732c2074686520666c75736865732077696c6c20626520657865637574656420696e20706172> 20.0195 <616c6c656c2c20696e6372656173696e6720746865206c6f6164206f6e20484446532c20616e64>] TJ
 ET
@@ -44995,7 +45050,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 198.2305 Td
+63.24 96.717 Td
 /F1.0 10.5 Tf
 <706f74656e7469616c6c792063617573696e67206d6f726520636f6d70616374696f6e732e> Tj
 ET
@@ -45006,7 +45061,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 171.0487 Td
+63.24 69.5352 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45017,18 +45072,88 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 153.6435 Td
+63.24 52.13 Td
 /F4.0 10.5 Tf
 <32> Tj
 ET
 
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
+q
+0.0 0.0 0.0 scn
+0.0 0.0 0.0 SCN
+1 w
+0 J
+0 j
+[] 0 d
+/Stamp1 Do
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+535.978 14.388 Td
+/F1.0 9 Tf
+<3435> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+Q
+Q
+
+endstream
+endobj
+312 0 obj
+<< /Type /Page
+/Parent 3 0 R
+/MediaBox [0 0 595.28 841.89]
+/CropBox [0 0 595.28 841.89]
+/BleedBox [0 0 595.28 841.89]
+/TrimBox [0 0 595.28 841.89]
+/ArtBox [0 0 595.28 841.89]
+/Contents 311 0 R
+/Resources << /ProcSet [/PDF /Text /ImageB /ImageC /ImageI]
+/Font << /F4.0 35 0 R
+/F3.0 33 0 R
+/F1.0 10 0 R
+>>
+/XObject << /Stamp1 4614 0 R
+>>
+>>
+>>
+endobj
+313 0 obj
+[312 0 R /XYZ 0 782.64 null]
+endobj
+314 0 obj
+[312 0 R /XYZ 0 662.098 null]
+endobj
+315 0 obj
+[312 0 R /XYZ 0 446.876 null]
+endobj
+316 0 obj
+[312 0 R /XYZ 0 310.554 null]
+endobj
+317 0 obj
+<< /Limits [(hbase.hstore.flusher.count) (hbase.lease.recovery.timeout)]
+/Names [(hbase.hstore.flusher.count) 318 0 R (hbase.hstore.time.to.purge.deletes) 331 0 R (hbase.http.filter.initializers) 483 0 R (hbase.http.max.threads) 485 0 R (hbase.http.staticuser.user) 490 0 R (hbase.ipc.client.fallback-to-simple-auth-allowed) 389 0 R (hbase.ipc.client.tcpnodelay) 369 0 R (hbase.ipc.server.callqueue.handler.factor) 226 0 R (hbase.ipc.server.callqueue.read.ratio) 229 0 R (hbase.ipc.server.callqueue.scan.ratio) 230 0 R (hbase.ipc.server.fallback-to-simple-auth-allowed) 390 0 R (hbase.lease.recovery.dfs.timeout) 433 0 R (hbase.lease.recovery.timeout) 432 0 R]
+>>
+endobj
+318 0 obj
+[312 0 R /XYZ 0 174.232 null]
+endobj
+319 0 obj
+<< /Length 9764
+>>
+stream
+q
+/DeviceRGB cs
 0.6941 0.1294 0.2745 scn
+/DeviceRGB CS
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 129.6435 Td
+48.24 796.11 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e626c6f636b696e6753746f726546696c6573> Tj
 ET
@@ -45039,7 +45164,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 113.0577 Td
+63.24 779.5242 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45052,7 +45177,7 @@ ET
 0.7104 Tw
 
 BT
-63.24 93.4685 Td
+63.24 759.935 Td
 /F1.0 10.5 Tf
 [<4966206d6f7265207468616e2074686973206e756d626572206f662053746f726546696c657320657869737420696e20616e> 20.0195 <79206f6e652053746f726520286f6e652053746f726546696c65206973207772697474656e2070657220666c757368>] TJ
 ET
@@ -45067,7 +45192,7 @@ ET
 2.2727 Tw
 
 BT
-63.24 77.6885 Td
+63.24 744.155 Td
 /F1.0 10.5 Tf
 [<6f66204d656d53746f7265292c20757064617465732061726520626c6f636b> 20.0195 <656420666f72207468697320726567696f6e20756e74696c206120636f6d70616374696f6e20697320636f6d706c657465642c206f7220756e74696c>] TJ
 ET
@@ -45080,88 +45205,18 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 61.9085 Td
+63.24 728.375 Td
 /F1.0 10.5 Tf
 [<68626173652e6873746f72652e626c6f636b696e6757> 49.8047 <61697454696d6520686173206265656e2065786365656465642e>] TJ
 ET
 
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
-q
-0.0 0.0 0.0 scn
-0.0 0.0 0.0 SCN
-1 w
-0 J
-0 j
-[] 0 d
-/Stamp1 Do
-0.2 0.2 0.2 scn
-0.2 0.2 0.2 SCN
-
-BT
-535.978 14.388 Td
-/F1.0 9 Tf
-<3435> Tj
-ET
-
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-Q
-Q
-
-endstream
-endobj
-312 0 obj
-<< /Type /Page
-/Parent 3 0 R
-/MediaBox [0 0 595.28 841.89]
-/CropBox [0 0 595.28 841.89]
-/BleedBox [0 0 595.28 841.89]
-/TrimBox [0 0 595.28 841.89]
-/ArtBox [0 0 595.28 841.89]
-/Contents 311 0 R
-/Resources << /ProcSet [/PDF /Text /ImageB /ImageC /ImageI]
-/Font << /F3.0 33 0 R
-/F4.0 35 0 R
-/F1.0 10 0 R
->>
-/XObject << /Stamp1 4613 0 R
->>
->>
->>
-endobj
-313 0 obj
-[312 0 R /XYZ 0 763.6115 null]
-endobj
-314 0 obj
-[312 0 R /XYZ 0 548.3895 null]
-endobj
-315 0 obj
-[312 0 R /XYZ 0 412.0675 null]
-endobj
-316 0 obj
-<< /Limits [(hbase.hstore.flusher.count) (hbase.lease.recovery.timeout)]
-/Names [(hbase.hstore.flusher.count) 317 0 R (hbase.hstore.time.to.purge.deletes) 330 0 R (hbase.http.filter.initializers) 482 0 R (hbase.http.max.threads) 484 0 R (hbase.http.staticuser.user) 487 0 R (hbase.ipc.client.fallback-to-simple-auth-allowed) 388 0 R (hbase.ipc.client.tcpnodelay) 368 0 R (hbase.ipc.server.callqueue.handler.factor) 226 0 R (hbase.ipc.server.callqueue.read.ratio) 229 0 R (hbase.ipc.server.callqueue.scan.ratio) 230 0 R (hbase.ipc.server.fallback-to-simple-auth-allowed) 389 0 R (hbase.lease.recovery.dfs.timeout) 432 0 R (hbase.lease.recovery.timeout) 431 0 R]
->>
-endobj
-317 0 obj
-[312 0 R /XYZ 0 275.7455 null]
-endobj
-318 0 obj
-[312 0 R /XYZ 0 139.4235 null]
-endobj
-319 0 obj
-<< /Length 10053
->>
-stream
-q
-/DeviceRGB cs
 0.2 0.2 0.2 scn
-/DeviceRGB CS
 0.2 0.2 0.2 SCN
 
 BT
-63.24 794.5242 Td
+63.24 701.1932 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45172,7 +45227,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 777.119 Td
+63.24 683.788 Td
 /F4.0 10.5 Tf
 <3136> Tj
 ET
@@ -45183,7 +45238,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 753.119 Td
+48.24 659.788 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e626c6f636b696e675761697454696d65> Tj
 ET
@@ -45194,7 +45249,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 736.5332 Td
+63.24 643.2022 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45207,7 +45262,7 @@ ET
 1.9972 Tw
 
 BT
-63.24 716.944 Td
+63.24 623.613 Td
 /F1.0 10.5 Tf
 [<5468652074696d6520666f72207768696368206120726567696f6e2077696c6c20626c6f636b2075706461746573206166746572207265616368696e67207468652053746f726546696c65206c696d697420646566696e65642062> 20.0195 <79>] TJ
 ET
@@ -45222,7 +45277,7 @@ ET
 4.0217 Tw
 
 BT
-63.24 701.164 Td
+63.24 607.833 Td
 /F1.0 10.5 Tf
 <68626173652e6873746f72652e626c6f636b696e6753746f726546696c65732e20416674657220746869732074696d652068617320656c61707365642c2074686520726567696f6e2077696c6c2073746f7020626c6f636b696e67> Tj
 ET
@@ -45235,7 +45290,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 685.384 Td
+63.24 592.053 Td
 /F1.0 10.5 Tf
 <75706461746573206576656e206966206120636f6d70616374696f6e20686173206e6f74206265656e20636f6d706c657465642e> Tj
 ET
@@ -45246,7 +45301,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 658.2022 Td
+63.24 564.8712 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45257,7 +45312,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 640.797 Td
+63.24 547.466 Td
 /F4.0 10.5 Tf
 <3930303030> Tj
 ET
@@ -45268,7 +45323,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 616.797 Td
+48.24 523.466 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e2e6d696e> Tj
 ET
@@ -45279,7 +45334,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 600.2112 Td
+63.24 506.8802 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45292,7 +45347,7 @@ ET
 1.1875 Tw
 
 BT
-63.24 580.622 Td
+63.24 487.291 Td
 /F1.0 10.5 Tf
 <546865206d696e696d756d206e756d626572206f662053746f726546696c6573207768696368206d75737420626520656c696769626c6520666f7220636f6d70616374696f6e206265666f726520636f6d70616374696f6e> Tj
 ET
@@ -45307,7 +45362,7 @@ ET
 1.2002 Tw
 
 BT
-63.24 564.842 Td
+63.24 471.511 Td
 /F1.0 10.5 Tf
 [<63616e2072756e2e2054686520676f616c206f662074756e696e672068626173652e6873746f72652e636f6d70616374696f6e2e6d696e20697320746f2061766f696420656e64696e67207570207769746820746f6f206d616e> 20.0195 <79>] TJ
 ET
@@ -45322,7 +45377,7 @@ ET
 0.0809 Tw
 
 BT
-63.24 549.062 Td
+63.24 455.731 Td
 /F1.0 10.5 Tf
 [<74696e> 20.0195 <792053746f726546696c657320746f20636f6d706163742e2053657474696e6720746869732076616c756520746f203220776f756c642063617573652061206d696e6f7220636f6d70616374696f6e20656163682074696d6520796f75>] TJ
 ET
@@ -45337,7 +45392,7 @@ ET
 0.0532 Tw
 
 BT
-63.24 533.282 Td
+63.24 439.951 Td
 /F1.0 10.5 Tf
 <686176652074776f2053746f726546696c657320696e20612053746f72652c20616e6420746869732069732070726f6261626c79206e6f7420617070726f7072696174652e20496620796f752073657420746869732076616c756520746f6f20686967682c> Tj
 ET
@@ -45352,7 +45407,7 @@ ET
 1.7286 Tw
 
 BT
-63.24 517.502 Td
+63.24 424.171 Td
 /F1.0 10.5 Tf
 [<616c6c20746865206f746865722076616c7565732077696c6c206e65656420746f2062652061646a7573746564206163636f7264696e676c79> 89.8438 <2e2046> 40.0391 <6f72206d6f73742063617365732c207468652064656661756c742076616c7565206973>] TJ
 ET
@@ -45367,7 +45422,7 @@ ET
 2.2554 Tw
 
 BT
-63.24 501.722 Td
+63.24 408.391 Td
 /F1.0 10.5 Tf
 [<617070726f7072696174652e20496e2070726576696f75732076657273696f6e73206f662048426173652c2074686520706172> 20.0195 <616d657465722068626173652e6873746f72652e636f6d70616374696f6e2e6d696e20776173>] TJ
 ET
@@ -45380,7 +45435,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 485.942 Td
+63.24 392.611 Td
 /F1.0 10.5 Tf
 <6e616d65642068626173652e6873746f72652e636f6d70616374696f6e5468726573686f6c642e> Tj
 ET
@@ -45391,7 +45446,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 458.7602 Td
+63.24 365.4292 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45402,7 +45457,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 441.355 Td
+63.24 348.024 Td
 /F4.0 10.5 Tf
 <33> Tj
 ET
@@ -45413,7 +45468,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 417.355 Td
+48.24 324.024 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e2e6d6178> Tj
 ET
@@ -45424,7 +45479,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 400.7692 Td
+63.24 307.4382 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45437,7 +45492,7 @@ ET
 2.6381 Tw
 
 BT
-63.24 381.18 Td
+63.24 287.849 Td
 /F1.0 10.5 Tf
 <546865206d6178696d756d206e756d626572206f662053746f726546696c65732077686963682077696c6c2062652073656c656374656420666f7220612073696e676c65206d696e6f7220636f6d70616374696f6e2c> Tj
 ET
@@ -45452,7 +45507,7 @@ ET
 13.2185 Tw
 
 BT
-63.24 365.4 Td
+63.24 272.069 Td
 /F1.0 10.5 Tf
 [<7265676172646c657373206f6620746865206e756d626572206f6620656c696769626c652053746f726546696c65732e204566666563746976656c79> 89.8438 <2c207468652076616c7565206f66>] TJ
 ET
@@ -45467,7 +45522,7 @@ ET
 4.2976 Tw
 
 BT
-63.24 349.62 Td
+63.24 256.289 Td
 /F1.0 10.5 Tf
 [<68626173652e6873746f72652e636f6d70616374696f6e2e6d617820636f6e74726f6c7320746865206c656e677468206f662074696d652069742074616b> 20.0195 <657320612073696e676c6520636f6d70616374696f6e20746f>] TJ
 ET
@@ -45482,7 +45537,7 @@ ET
 1.3164 Tw
 
 BT
-63.24 333.84 Td
+63.24 240.509 Td
 /F1.0 10.5 Tf
 [<636f6d706c6574652e2053657474696e67206974206c6172676572206d65616e732074686174206d6f72652053746f726546696c65732061726520696e636c7564656420696e206120636f6d70616374696f6e2e2046> 40.0391 <6f72206d6f7374>] TJ
 ET
@@ -45495,7 +45550,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 318.06 Td
+63.24 224.729 Td
 /F1.0 10.5 Tf
 <63617365732c207468652064656661756c742076616c756520697320617070726f7072696174652e> Tj
 ET
@@ -45506,7 +45561,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 290.8782 Td
+63.24 197.5472 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45517,7 +45572,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 273.473 Td
+63.24 180.142 Td
 /F4.0 10.5 Tf
 <3130> Tj
 ET
@@ -45528,7 +45583,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 249.473 Td
+48.24 156.142 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e2e6d696e2e73697a65> Tj
 ET
@@ -45539,7 +45594,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 232.8872 Td
+63.24 139.5562 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45552,7 +45607,7 @@ ET
 1.5494 Tw
 
 BT
-63.24 213.298 Td
+63.24 119.967 Td
 /F1.0 10.5 Tf
 <412053746f726546696c6520286f7220612073656c656374696f6e206f662053746f726546696c65732c207768656e207573696e67204578706c6f72696e67436f6d70616374696f6e506f6c6963792920736d616c6c6572207468616e> Tj
 ET
@@ -45567,7 +45622,7 @@ ET
 0.3526 Tw
 
 BT
-63.24 197.518 Td
+63.24 104.187 Td
 /F1.0 10.5 Tf
 [<746869732073697a652077696c6c20616c7761> 20.0195 <797320626520656c696769626c6520666f72206d696e6f7220636f6d70616374696f6e2e204846696c657320746869732073697a65206f72206c617267657220617265206576616c75617465642062> 20.0195 <79>] TJ
 ET
@@ -45582,7 +45637,7 @@ ET
 0.4183 Tw
 
 BT
-63.24 181.738 Td
+63.24 88.407 Td
 /F1.0 10.5 Tf
 [<68626173652e6873746f72652e636f6d70616374696f6e2e72> 20.0195 <6174696f20746f2064657465726d696e6520696620746865792061726520656c696769626c652e20426563617573652074686973206c696d697420726570726573656e747320746865>] TJ
 ET
@@ -45597,7 +45652,7 @@ ET
 1.5926 Tw
 
 BT
-63.24 165.958 Td
+63.24 72.627 Td
 /F1.0 10.5 Tf
 [<226175746f6d6174696320696e636c75646522206c696d697420666f7220616c6c2053746f726546696c657320736d616c6c6572207468616e20746869732076616c75652c20746869732076616c7565206d61> 20.0195 <79206e65656420746f206265>] TJ
 ET
@@ -45612,7 +45667,7 @@ ET
 2.0735 Tw
 
 BT
-63.24 150.178 Td
+63.24 56.847 Td
 /F1.0 10.5 Tf
 [<7265647563656420696e2077726974652d686561767920656e7669726f6e6d656e7473207768657265206d616e> 20.0195 <792053746f726546696c657320696e2074686520312d32204d422072> 20.0195 <616e676520617265206265696e67>] TJ
 ET
@@ -45621,13 +45676,78 @@ ET
 0.0 Tw
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
+q
+0.0 0.0 0.0 scn
+0.0 0.0 0.0 SCN
+1 w
+0 J
+0 j
+[] 0 d
+/Stamp2 Do
+0.2 0.2 0.2 scn
+0.2 0.2 0.2 SCN
+
+BT
+49.24 14.388 Td
+/F1.0 9 Tf
+<3436> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+Q
+Q
+
+endstream
+endobj
+320 0 obj
+<< /Type /Page
+/Parent 3 0 R
+/MediaBox [0 0 595.28 841.89]
+/CropBox [0 0 595.28 841.89]
+/BleedBox [0 0 595.28 841.89]
+/TrimBox [0 0 595.28 841.89]
+/ArtBox [0 0 595.28 841.89]
+/Contents 319 0 R
+/Resources << /ProcSet [/PDF /Text /ImageB /ImageC /ImageI]
+/Font << /F4.0 35 0 R
+/F3.0 33 0 R
+/F1.0 10 0 R
+>>
+/XObject << /Stamp2 4615 0 R
+>>
+>>
+>>
+endobj
+321 0 obj
+[320 0 R /XYZ 0 805.89 null]
+endobj
+322 0 obj
+[320 0 R /XYZ 0 669.568 null]
+endobj
+323 0 obj
+[320 0 R /XYZ 0 533.246 null]
+endobj
+324 0 obj
+[320 0 R /XYZ 0 333.804 null]
+endobj
+325 0 obj
+[320 0 R /XYZ 0 165.922 null]
+endobj
+326 0 obj
+<< /Length 10501
+>>
+stream
+q
+/DeviceRGB cs
 0.2 0.2 0.2 scn
+/DeviceRGB CS
 0.2 0.2 0.2 SCN
 
 1.9785 Tw
 
 BT
-63.24 134.398 Td
+63.24 794.676 Td
 /F1.0 10.5 Tf
 <666c75736865642c20626563617573652065766572792053746f726546696c652077696c6c20626520746172676574656420666f7220636f6d70616374696f6e20616e642074686520726573756c74696e672053746f726546696c6573> Tj
 ET
@@ -45642,7 +45762,7 @@ ET
 2.6642 Tw
 
 BT
-63.24 118.618 Td
+63.24 778.896 Td
 /F1.0 10.5 Tf
 [<6d61> 20.0195 <79207374696c6c20626520756e64657220746865206d696e696d756d2073697a6520616e642072657175697265206675727468657220636f6d70616374696f6e2e204966207468697320706172> 20.0195 <616d65746572206973>] TJ
 ET
@@ -45657,7 +45777,7 @@ ET
 1.4198 Tw
 
 BT
-63.24 102.838 Td
+63.24 763.116 Td
 /F1.0 10.5 Tf
 [<6c6f77657265642c207468652072> 20.0195 <6174696f20636865636b20697320747269676765726564206d6f726520717569636b6c79> 89.8438 <2e20546869732061646472657373656420736f6d6520697373756573207365656e20696e206561726c696572>] TJ
 ET
@@ -45672,7 +45792,7 @@ ET
 3.2431 Tw
 
 BT
-63.24 87.058 Td
+63.24 747.336 Td
 /F1.0 10.5 Tf
 [<76657273696f6e73206f6620484261736520627574206368616e67696e67207468697320706172> 20.0195 <616d65746572206973206e6f206c6f6e676572206e656365737361727920696e206d6f737420736974756174696f6e732e>] TJ
 ET
@@ -45685,80 +45805,18 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 71.278 Td
+63.24 731.556 Td
 /F1.0 10.5 Tf
 [<44656661756c743a20313238204d422065787072657373656420696e2062> 20.0195 <797465732e>] TJ
 ET
 
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
-q
-0.0 0.0 0.0 scn
-0.0 0.0 0.0 SCN
-1 w
-0 J
-0 j
-[] 0 d
-/Stamp2 Do
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
 BT
-49.24 14.388 Td
-/F1.0 9 Tf
-<3436> Tj
-ET
-
-0.0 0.0 0.0 SCN
-0.0 0.0 0.0 scn
-Q
-Q
-
-endstream
-endobj
-320 0 obj
-<< /Type /Page
-/Parent 3 0 R
-/MediaBox [0 0 595.28 841.89]
-/CropBox [0 0 595.28 841.89]
-/BleedBox [0 0 595.28 841.89]
-/TrimBox [0 0 595.28 841.89]
-/ArtBox [0 0 595.28 841.89]
-/Contents 319 0 R
-/Resources << /ProcSet [/PDF /Text /ImageB /ImageC /ImageI]
-/Font << /F3.0 33 0 R
-/F4.0 35 0 R
-/F1.0 10 0 R
->>
-/XObject << /Stamp2 4614 0 R
->>
->>
->>
-endobj
-321 0 obj
-[320 0 R /XYZ 0 762.899 null]
-endobj
-322 0 obj
-[320 0 R /XYZ 0 626.577 null]
-endobj
-323 0 obj
-[320 0 R /XYZ 0 427.135 null]
-endobj
-324 0 obj
-[320 0 R /XYZ 0 259.253 null]
-endobj
-325 0 obj
-<< /Length 10469
->>
-stream
-q
-/DeviceRGB cs
-0.2 0.2 0.2 scn
-/DeviceRGB CS
-0.2 0.2 0.2 SCN
-
-BT
-63.24 795.2367 Td
+63.24 704.3742 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45769,7 +45827,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 777.8315 Td
+63.24 686.969 Td
 /F4.0 10.5 Tf
 <313334323137373238> Tj
 ET
@@ -45780,7 +45838,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 753.8315 Td
+48.24 662.969 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e2e6d61782e73697a65> Tj
 ET
@@ -45791,7 +45849,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 737.2457 Td
+63.24 646.3832 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45804,7 +45862,7 @@ ET
 0.2662 Tw
 
 BT
-63.24 717.6565 Td
+63.24 626.794 Td
 /F1.0 10.5 Tf
 <412053746f726546696c6520286f7220612073656c656374696f6e206f662053746f726546696c65732c207768656e207573696e67204578706c6f72696e67436f6d70616374696f6e506f6c69637929206c6172676572207468616e2074686973> Tj
 ET
@@ -45819,7 +45877,7 @@ ET
 0.236 Tw
 
 BT
-63.24 701.8765 Td
+63.24 611.014 Td
 /F1.0 10.5 Tf
 [<73697a652077696c6c206265206578636c756465642066726f6d20636f6d70616374696f6e2e2054686520656666656374206f662072> 20.0195 <616973696e672068626173652e6873746f72652e636f6d70616374696f6e2e6d61782e73697a65206973>] TJ
 ET
@@ -45834,7 +45892,7 @@ ET
 4.0201 Tw
 
 BT
-63.24 686.0965 Td
+63.24 595.234 Td
 /F1.0 10.5 Tf
 <66657765722c206c61726765722053746f726546696c6573207468617420646f206e6f742067657420636f6d706163746564206f6674656e2e20496620796f75206665656c207468617420636f6d70616374696f6e206973> Tj
 ET
@@ -45849,7 +45907,7 @@ ET
 0.9927 Tw
 
 BT
-63.24 670.3165 Td
+63.24 579.454 Td
 /F1.0 10.5 Tf
 [<68617070656e696e6720746f6f206f6674656e20776974686f7574206d7563682062656e656669742c20796f752063616e207472792072> 20.0195 <616973696e6720746869732076616c75652e2044656661756c743a207468652076616c7565206f66>] TJ
 ET
@@ -45862,7 +45920,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 654.5365 Td
+63.24 563.674 Td
 /F1.0 10.5 Tf
 [<4c4f4e472e4d41585f56> 80.0781 <414c55452c2065787072657373656420696e2062> 20.0195 <797465732e>] TJ
 ET
@@ -45873,7 +45931,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 627.3547 Td
+63.24 536.4922 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -45884,7 +45942,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 609.9495 Td
+63.24 519.087 Td
 /F4.0 10.5 Tf
 <39323233333732303336383534373735383037> Tj
 ET
@@ -45895,7 +45953,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 585.9495 Td
+48.24 495.087 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e2e726174696f> Tj
 ET
@@ -45906,7 +45964,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 569.3637 Td
+63.24 478.5012 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -45919,7 +45977,7 @@ ET
 0.6225 Tw
 
 BT
-63.24 549.7745 Td
+63.24 458.912 Td
 /F1.0 10.5 Tf
 [<46> 40.0391 <6f72206d696e6f7220636f6d70616374696f6e2c20746869732072> 20.0195 <6174696f206973207573656420746f2064657465726d696e652077686574686572206120676976656e2053746f726546696c65207768696368206973206c6172676572>] TJ
 ET
@@ -45934,7 +45992,7 @@ ET
 0.0909 Tw
 
 BT
-63.24 533.9945 Td
+63.24 443.132 Td
 /F1.0 10.5 Tf
 <7468616e2068626173652e6873746f72652e636f6d70616374696f6e2e6d696e2e73697a6520697320656c696769626c6520666f7220636f6d70616374696f6e2e204974732065666665637420697320746f206c696d697420636f6d70616374696f6e> Tj
 ET
@@ -45949,7 +46007,7 @@ ET
 1.5007 Tw
 
 BT
-63.24 518.2145 Td
+63.24 427.352 Td
 /F1.0 10.5 Tf
 [<6f66206c617267652053746f726546696c65732e205468652076616c7565206f662068626173652e6873746f72652e636f6d70616374696f6e2e72> 20.0195 <6174696f20697320657870726573736564206173206120666c6f6174696e672d706f696e74>] TJ
 ET
@@ -45964,7 +46022,7 @@ ET
 0.7029 Tw
 
 BT
-63.24 502.4345 Td
+63.24 411.572 Td
 /F1.0 10.5 Tf
 [<646563696d616c2e2041206c617267652072> 20.0195 <6174696f2c20737563682061732031302c2077696c6c2070726f6475636520612073696e676c65206769616e742053746f726546696c652e20436f6e76657273656c79> 89.8438 <2c2061206c6f772076616c75652c>] TJ
 ET
@@ -45979,7 +46037,7 @@ ET
 0.34 Tw
 
 BT
-63.24 486.6545 Td
+63.24 395.792 Td
 /F1.0 10.5 Tf
 [<73756368206173202e32352c2077696c6c2070726f64756365206265686176696f722073696d696c617220746f207468652042696754> 29.7852 <61626c6520636f6d70616374696f6e20616c676f726974686d2c2070726f647563696e6720666f7572>] TJ
 ET
@@ -45994,7 +46052,7 @@ ET
 1.1274 Tw
 
 BT
-63.24 470.8745 Td
+63.24 380.012 Td
 /F1.0 10.5 Tf
 [<53746f726546696c65732e2041206d6f646572> 20.0195 <6174652076616c7565206f66206265747765656e20312e3020616e6420312e34206973207265636f6d6d656e6465642e205768656e2074756e696e6720746869732076616c75652c>] TJ
 ET
@@ -46009,7 +46067,7 @@ ET
 0.4398 Tw
 
 BT
-63.24 455.0945 Td
+63.24 364.232 Td
 /F1.0 10.5 Tf
 [<796f75206172652062616c616e63696e6720777269746520636f7374732077697468207265616420636f7374732e2052616973696e67207468652076616c75652028746f20736f6d657468696e67206c696b> 20.0195 <6520312e34292077696c6c2068617665>] TJ
 ET
@@ -46024,7 +46082,7 @@ ET
 0.2651 Tw
 
 BT
-63.24 439.3145 Td
+63.24 348.452 Td
 /F1.0 10.5 Tf
 <6d6f726520777269746520636f7374732c206265636175736520796f752077696c6c20636f6d70616374206c61726765722053746f726546696c65732e20486f77657665722c20647572696e672072656164732c2048426173652077696c6c> Tj
 ET
@@ -46039,7 +46097,7 @@ ET
 1.9149 Tw
 
 BT
-63.24 423.5345 Td
+63.24 332.672 Td
 /F1.0 10.5 Tf
 <6e65656420746f207365656b207468726f7567682066657765722053746f726546696c657320746f206163636f6d706c6973682074686520726561642e20436f6e7369646572207468697320617070726f61636820696620796f75> Tj
 ET
@@ -46054,7 +46112,7 @@ ET
 0.0835 Tw
 
 BT
-63.24 407.7545 Td
+63.24 316.892 Td
 /F1.0 10.5 Tf
 [<63616e6e6f742074616b> 20.0195 <6520616476616e74616765206f6620426c6f6f6d2066696c746572732e204f74686572776973652c20796f752063616e206c6f77657220746869732076616c756520746f20736f6d657468696e67206c696b> 20.0195 <6520312e30>] TJ
 ET
@@ -46069,7 +46127,7 @@ ET
 3.3076 Tw
 
 BT
-63.24 391.9745 Td
+63.24 301.112 Td
 /F1.0 10.5 Tf
 [<746f2072656475636520746865206261636b> 20.0195 <67726f756e6420636f7374206f66207772697465732c20616e642075736520426c6f6f6d2066696c7465727320746f20636f6e74726f6c20746865206e756d626572206f66>] TJ
 ET
@@ -46082,7 +46140,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 376.1945 Td
+63.24 285.332 Td
 /F1.0 10.5 Tf
 [<53746f726546696c657320746f756368656420647572696e672072656164732e2046> 40.0391 <6f72206d6f73742063617365732c207468652064656661756c742076616c756520697320617070726f7072696174652e>] TJ
 ET
@@ -46093,7 +46151,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 349.0127 Td
+63.24 258.1502 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -46104,7 +46162,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 331.6075 Td
+63.24 240.745 Td
 /F4.0 10.5 Tf
 <312e3246> Tj
 ET
@@ -46115,7 +46173,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 307.6075 Td
+48.24 216.745 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e636f6d70616374696f6e2e726174696f2e6f66667065616b> Tj
 ET
@@ -46126,7 +46184,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 291.0217 Td
+63.24 200.1592 Td
 /F3.0 9.975 Tf
 <4465736372697074696f6e> Tj
 ET
@@ -46139,7 +46197,7 @@ ET
 0.7059 Tw
 
 BT
-63.24 271.4325 Td
+63.24 180.57 Td
 /F1.0 10.5 Tf
 [<416c6c6f777320796f7520746f20736574206120646966666572656e74202862> 20.0195 <792064656661756c742c206d6f72652061676772657373697665292072> 20.0195 <6174696f20666f722064657465726d696e696e672077686574686572206c6172676572>] TJ
 ET
@@ -46154,7 +46212,7 @@ ET
 3.3344 Tw
 
 BT
-63.24 255.6525 Td
+63.24 164.79 Td
 /F1.0 10.5 Tf
 [<53746f726546696c65732061726520696e636c7564656420696e20636f6d70616374696f6e7320647572696e67206f66662d7065616b20686f7572732e2057> 60.0586 <6f726b7320696e207468652073616d65207761> 20.0195 <79206173>] TJ
 ET
@@ -46169,7 +46227,7 @@ ET
 21.6346 Tw
 
 BT
-63.24 239.8725 Td
+63.24 149.01 Td
 /F1.0 10.5 Tf
 [<68626173652e6873746f72652e636f6d70616374696f6e2e72> 20.0195 <6174696f2e204f6e6c79206170706c6965732069662068626173652e6f66667065616b2e73746172742e686f757220616e64>] TJ
 ET
@@ -46182,7 +46240,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 224.0925 Td
+63.24 133.23 Td
 /F1.0 10.5 Tf
 <68626173652e6f66667065616b2e656e642e686f75722061726520616c736f20656e61626c65642e> Tj
 ET
@@ -46193,7 +46251,7 @@ ET
 0.2 0.2 0.2 SCN
 
 BT
-63.24 196.9107 Td
+63.24 106.0482 Td
 /F3.0 9.975 Tf
 <44656661756c74> Tj
 ET
@@ -46204,7 +46262,7 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-63.24 179.5055 Td
+63.24 88.643 Td
 /F4.0 10.5 Tf
 <352e3046> Tj
 ET
@@ -46215,18 +46273,80 @@ ET
 0.6941 0.1294 0.2745 SCN
 
 BT
-48.24 155.5055 Td
+48.24 64.643 Td
 /F4.0 10.5 Tf
 <68626173652e6873746f72652e74696d652e746f2e70757267652e64656c65746573> Tj
 ET
 
 0.0 0.0 0.0 SCN
 0.0 0.0 0.0 scn
+q
+0.0 0.0 0.0 scn
+0.0 0.0 0.0 SCN
+1 w
+0 J
+0 j
+[] 0 d
+/Stamp1 Do
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
 BT
-63.24 138.9197 Td
+535.978 14.388 Td
+/F1.0 9 Tf
+<3437> Tj
+ET
+
+0.0 0.0 0.0 SCN
+0.0 0.0 0.0 scn
+Q
+Q
+
+endstream
+endobj
+327 0 obj
+<< /Type /Page
+/Parent 3 0 R
+/Medi

<TRUNCATED>

[03/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("F

<TRUNCATED>

[20/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span cla

<TRUNCATED>

[13/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed to create

<TRUNCATED>

[27/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
index c27dc14..68e703a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
@@ -68,290 +68,288 @@
 <span class="sourceLineNo">060</span><a name="line.60"></a>
 <span class="sourceLineNo">061</span>  public final static long DEEP_OVERHEAD = FIXED_OVERHEAD;<a name="line.61"></a>
 <span class="sourceLineNo">062</span><a name="line.62"></a>
-<span class="sourceLineNo">063</span>  public static long addToScanners(List&lt;? extends Segment&gt; segments, long readPt, long order,<a name="line.63"></a>
+<span class="sourceLineNo">063</span>  public static void addToScanners(List&lt;? extends Segment&gt; segments, long readPt,<a name="line.63"></a>
 <span class="sourceLineNo">064</span>      List&lt;KeyValueScanner&gt; scanners) {<a name="line.64"></a>
 <span class="sourceLineNo">065</span>    for (Segment item : segments) {<a name="line.65"></a>
-<span class="sourceLineNo">066</span>      order = addToScanners(item, readPt, order, scanners);<a name="line.66"></a>
+<span class="sourceLineNo">066</span>      addToScanners(item, readPt, scanners);<a name="line.66"></a>
 <span class="sourceLineNo">067</span>    }<a name="line.67"></a>
-<span class="sourceLineNo">068</span>    return order;<a name="line.68"></a>
-<span class="sourceLineNo">069</span>  }<a name="line.69"></a>
-<span class="sourceLineNo">070</span><a name="line.70"></a>
-<span class="sourceLineNo">071</span>  protected static long addToScanners(Segment segment, long readPt, long order,<a name="line.71"></a>
-<span class="sourceLineNo">072</span>      List&lt;KeyValueScanner&gt; scanners) {<a name="line.72"></a>
-<span class="sourceLineNo">073</span>    scanners.add(segment.getScanner(readPt, order));<a name="line.73"></a>
-<span class="sourceLineNo">074</span>    return order - 1;<a name="line.74"></a>
-<span class="sourceLineNo">075</span>  }<a name="line.75"></a>
-<span class="sourceLineNo">076</span><a name="line.76"></a>
-<span class="sourceLineNo">077</span>  protected AbstractMemStore(final Configuration conf, final CellComparator c) {<a name="line.77"></a>
-<span class="sourceLineNo">078</span>    this.conf = conf;<a name="line.78"></a>
-<span class="sourceLineNo">079</span>    this.comparator = c;<a name="line.79"></a>
-<span class="sourceLineNo">080</span>    resetActive();<a name="line.80"></a>
-<span class="sourceLineNo">081</span>    this.snapshot = SegmentFactory.instance().createImmutableSegment(c);<a name="line.81"></a>
-<span class="sourceLineNo">082</span>    this.snapshotId = NO_SNAPSHOT_ID;<a name="line.82"></a>
-<span class="sourceLineNo">083</span>  }<a name="line.83"></a>
-<span class="sourceLineNo">084</span><a name="line.84"></a>
-<span class="sourceLineNo">085</span>  protected void resetActive() {<a name="line.85"></a>
-<span class="sourceLineNo">086</span>    // Reset heap to not include any keys<a name="line.86"></a>
-<span class="sourceLineNo">087</span>    this.active = SegmentFactory.instance().createMutableSegment(conf, comparator);<a name="line.87"></a>
-<span class="sourceLineNo">088</span>    this.timeOfOldestEdit = Long.MAX_VALUE;<a name="line.88"></a>
-<span class="sourceLineNo">089</span>  }<a name="line.89"></a>
-<span class="sourceLineNo">090</span><a name="line.90"></a>
-<span class="sourceLineNo">091</span>  /**<a name="line.91"></a>
-<span class="sourceLineNo">092</span>   * Updates the wal with the lowest sequence id (oldest entry) that is still in memory<a name="line.92"></a>
-<span class="sourceLineNo">093</span>   * @param onlyIfMoreRecent a flag that marks whether to update the sequence id no matter what or<a name="line.93"></a>
-<span class="sourceLineNo">094</span>   *                      only if it is greater than the previous sequence id<a name="line.94"></a>
-<span class="sourceLineNo">095</span>   */<a name="line.95"></a>
-<span class="sourceLineNo">096</span>  public abstract void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent);<a name="line.96"></a>
-<span class="sourceLineNo">097</span><a name="line.97"></a>
-<span class="sourceLineNo">098</span>  @Override<a name="line.98"></a>
-<span class="sourceLineNo">099</span>  public void add(Iterable&lt;Cell&gt; cells, MemStoreSizing memstoreSizing) {<a name="line.99"></a>
-<span class="sourceLineNo">100</span>    for (Cell cell : cells) {<a name="line.100"></a>
-<span class="sourceLineNo">101</span>      add(cell, memstoreSizing);<a name="line.101"></a>
-<span class="sourceLineNo">102</span>    }<a name="line.102"></a>
-<span class="sourceLineNo">103</span>  }<a name="line.103"></a>
-<span class="sourceLineNo">104</span><a name="line.104"></a>
-<span class="sourceLineNo">105</span>  @Override<a name="line.105"></a>
-<span class="sourceLineNo">106</span>  public void add(Cell cell, MemStoreSizing memstoreSizing) {<a name="line.106"></a>
-<span class="sourceLineNo">107</span>    Cell toAdd = maybeCloneWithAllocator(cell, false);<a name="line.107"></a>
-<span class="sourceLineNo">108</span>    boolean mslabUsed = (toAdd != cell);<a name="line.108"></a>
-<span class="sourceLineNo">109</span>    // This cell data is backed by the same byte[] where we read request in RPC(See HBASE-15180). By<a name="line.109"></a>
-<span class="sourceLineNo">110</span>    // default MSLAB is ON and we might have copied cell to MSLAB area. If not we must do below deep<a name="line.110"></a>
-<span class="sourceLineNo">111</span>    // copy. Or else we will keep referring to the bigger chunk of memory and prevent it from<a name="line.111"></a>
-<span class="sourceLineNo">112</span>    // getting GCed.<a name="line.112"></a>
-<span class="sourceLineNo">113</span>    // Copy to MSLAB would not have happened if<a name="line.113"></a>
-<span class="sourceLineNo">114</span>    // 1. MSLAB is turned OFF. See "hbase.hregion.memstore.mslab.enabled"<a name="line.114"></a>
-<span class="sourceLineNo">115</span>    // 2. When the size of the cell is bigger than the max size supported by MSLAB. See<a name="line.115"></a>
-<span class="sourceLineNo">116</span>    // "hbase.hregion.memstore.mslab.max.allocation". This defaults to 256 KB<a name="line.116"></a>
-<span class="sourceLineNo">117</span>    // 3. When cells are from Append/Increment operation.<a name="line.117"></a>
-<span class="sourceLineNo">118</span>    if (!mslabUsed) {<a name="line.118"></a>
-<span class="sourceLineNo">119</span>      toAdd = deepCopyIfNeeded(toAdd);<a name="line.119"></a>
-<span class="sourceLineNo">120</span>    }<a name="line.120"></a>
-<span class="sourceLineNo">121</span>    internalAdd(toAdd, mslabUsed, memstoreSizing);<a name="line.121"></a>
-<span class="sourceLineNo">122</span>  }<a name="line.122"></a>
-<span class="sourceLineNo">123</span><a name="line.123"></a>
-<span class="sourceLineNo">124</span>  private static Cell deepCopyIfNeeded(Cell cell) {<a name="line.124"></a>
-<span class="sourceLineNo">125</span>    if (cell instanceof ExtendedCell) {<a name="line.125"></a>
-<span class="sourceLineNo">126</span>      return ((ExtendedCell) cell).deepClone();<a name="line.126"></a>
-<span class="sourceLineNo">127</span>    }<a name="line.127"></a>
-<span class="sourceLineNo">128</span>    return cell;<a name="line.128"></a>
-<span class="sourceLineNo">129</span>  }<a name="line.129"></a>
-<span class="sourceLineNo">130</span><a name="line.130"></a>
-<span class="sourceLineNo">131</span>  @Override<a name="line.131"></a>
-<span class="sourceLineNo">132</span>  public void upsert(Iterable&lt;Cell&gt; cells, long readpoint, MemStoreSizing memstoreSizing) {<a name="line.132"></a>
-<span class="sourceLineNo">133</span>    for (Cell cell : cells) {<a name="line.133"></a>
-<span class="sourceLineNo">134</span>      upsert(cell, readpoint, memstoreSizing);<a name="line.134"></a>
-<span class="sourceLineNo">135</span>    }<a name="line.135"></a>
-<span class="sourceLineNo">136</span>  }<a name="line.136"></a>
-<span class="sourceLineNo">137</span><a name="line.137"></a>
-<span class="sourceLineNo">138</span>  /**<a name="line.138"></a>
-<span class="sourceLineNo">139</span>   * @return Oldest timestamp of all the Cells in the MemStore<a name="line.139"></a>
-<span class="sourceLineNo">140</span>   */<a name="line.140"></a>
-<span class="sourceLineNo">141</span>  @Override<a name="line.141"></a>
-<span class="sourceLineNo">142</span>  public long timeOfOldestEdit() {<a name="line.142"></a>
-<span class="sourceLineNo">143</span>    return timeOfOldestEdit;<a name="line.143"></a>
-<span class="sourceLineNo">144</span>  }<a name="line.144"></a>
-<span class="sourceLineNo">145</span><a name="line.145"></a>
-<span class="sourceLineNo">146</span>  /**<a name="line.146"></a>
-<span class="sourceLineNo">147</span>   * The passed snapshot was successfully persisted; it can be let go.<a name="line.147"></a>
-<span class="sourceLineNo">148</span>   * @param id Id of the snapshot to clean out.<a name="line.148"></a>
-<span class="sourceLineNo">149</span>   * @see MemStore#snapshot()<a name="line.149"></a>
-<span class="sourceLineNo">150</span>   */<a name="line.150"></a>
-<span class="sourceLineNo">151</span>  @Override<a name="line.151"></a>
-<span class="sourceLineNo">152</span>  public void clearSnapshot(long id) throws UnexpectedStateException {<a name="line.152"></a>
-<span class="sourceLineNo">153</span>    if (this.snapshotId == -1) return;  // already cleared<a name="line.153"></a>
-<span class="sourceLineNo">154</span>    if (this.snapshotId != id) {<a name="line.154"></a>
-<span class="sourceLineNo">155</span>      throw new UnexpectedStateException("Current snapshot id is " + this.snapshotId + ",passed "<a name="line.155"></a>
-<span class="sourceLineNo">156</span>          + id);<a name="line.156"></a>
-<span class="sourceLineNo">157</span>    }<a name="line.157"></a>
-<span class="sourceLineNo">158</span>    // OK. Passed in snapshot is same as current snapshot. If not-empty,<a name="line.158"></a>
-<span class="sourceLineNo">159</span>    // create a new snapshot and let the old one go.<a name="line.159"></a>
-<span class="sourceLineNo">160</span>    Segment oldSnapshot = this.snapshot;<a name="line.160"></a>
-<span class="sourceLineNo">161</span>    if (!this.snapshot.isEmpty()) {<a name="line.161"></a>
-<span class="sourceLineNo">162</span>      this.snapshot = SegmentFactory.instance().createImmutableSegment(this.comparator);<a name="line.162"></a>
-<span class="sourceLineNo">163</span>    }<a name="line.163"></a>
-<span class="sourceLineNo">164</span>    this.snapshotId = NO_SNAPSHOT_ID;<a name="line.164"></a>
-<span class="sourceLineNo">165</span>    oldSnapshot.close();<a name="line.165"></a>
-<span class="sourceLineNo">166</span>  }<a name="line.166"></a>
-<span class="sourceLineNo">167</span><a name="line.167"></a>
-<span class="sourceLineNo">168</span>  @Override<a name="line.168"></a>
-<span class="sourceLineNo">169</span>  public MemStoreSize getSnapshotSize() {<a name="line.169"></a>
-<span class="sourceLineNo">170</span>    return getSnapshotSizing();<a name="line.170"></a>
-<span class="sourceLineNo">171</span>  }<a name="line.171"></a>
-<span class="sourceLineNo">172</span><a name="line.172"></a>
-<span class="sourceLineNo">173</span>  MemStoreSizing getSnapshotSizing() {<a name="line.173"></a>
-<span class="sourceLineNo">174</span>    return new MemStoreSizing(this.snapshot.keySize(),<a name="line.174"></a>
-<span class="sourceLineNo">175</span>        this.snapshot.heapSize(),<a name="line.175"></a>
-<span class="sourceLineNo">176</span>        this.snapshot.offHeapSize());<a name="line.176"></a>
-<span class="sourceLineNo">177</span>  }<a name="line.177"></a>
-<span class="sourceLineNo">178</span><a name="line.178"></a>
-<span class="sourceLineNo">179</span>  @Override<a name="line.179"></a>
-<span class="sourceLineNo">180</span>  public String toString() {<a name="line.180"></a>
-<span class="sourceLineNo">181</span>    StringBuilder buf = new StringBuilder();<a name="line.181"></a>
-<span class="sourceLineNo">182</span>    int i = 1;<a name="line.182"></a>
-<span class="sourceLineNo">183</span>    try {<a name="line.183"></a>
-<span class="sourceLineNo">184</span>      for (Segment segment : getSegments()) {<a name="line.184"></a>
-<span class="sourceLineNo">185</span>        buf.append("Segment (" + i + ") " + segment.toString() + "; ");<a name="line.185"></a>
-<span class="sourceLineNo">186</span>        i++;<a name="line.186"></a>
-<span class="sourceLineNo">187</span>      }<a name="line.187"></a>
-<span class="sourceLineNo">188</span>    } catch (IOException e){<a name="line.188"></a>
-<span class="sourceLineNo">189</span>      return e.toString();<a name="line.189"></a>
-<span class="sourceLineNo">190</span>    }<a name="line.190"></a>
-<span class="sourceLineNo">191</span>    return buf.toString();<a name="line.191"></a>
-<span class="sourceLineNo">192</span>  }<a name="line.192"></a>
-<span class="sourceLineNo">193</span><a name="line.193"></a>
-<span class="sourceLineNo">194</span>  protected Configuration getConfiguration() {<a name="line.194"></a>
-<span class="sourceLineNo">195</span>    return conf;<a name="line.195"></a>
-<span class="sourceLineNo">196</span>  }<a name="line.196"></a>
-<span class="sourceLineNo">197</span><a name="line.197"></a>
-<span class="sourceLineNo">198</span>  protected void dump(Logger log) {<a name="line.198"></a>
-<span class="sourceLineNo">199</span>    active.dump(log);<a name="line.199"></a>
-<span class="sourceLineNo">200</span>    snapshot.dump(log);<a name="line.200"></a>
-<span class="sourceLineNo">201</span>  }<a name="line.201"></a>
-<span class="sourceLineNo">202</span><a name="line.202"></a>
-<span class="sourceLineNo">203</span><a name="line.203"></a>
-<span class="sourceLineNo">204</span>  /*<a name="line.204"></a>
-<span class="sourceLineNo">205</span>   * Inserts the specified Cell into MemStore and deletes any existing<a name="line.205"></a>
-<span class="sourceLineNo">206</span>   * versions of the same row/family/qualifier as the specified Cell.<a name="line.206"></a>
+<span class="sourceLineNo">068</span>  }<a name="line.68"></a>
+<span class="sourceLineNo">069</span><a name="line.69"></a>
+<span class="sourceLineNo">070</span>  protected static void addToScanners(Segment segment, long readPt,<a name="line.70"></a>
+<span class="sourceLineNo">071</span>      List&lt;KeyValueScanner&gt; scanners) {<a name="line.71"></a>
+<span class="sourceLineNo">072</span>    scanners.add(segment.getScanner(readPt));<a name="line.72"></a>
+<span class="sourceLineNo">073</span>  }<a name="line.73"></a>
+<span class="sourceLineNo">074</span><a name="line.74"></a>
+<span class="sourceLineNo">075</span>  protected AbstractMemStore(final Configuration conf, final CellComparator c) {<a name="line.75"></a>
+<span class="sourceLineNo">076</span>    this.conf = conf;<a name="line.76"></a>
+<span class="sourceLineNo">077</span>    this.comparator = c;<a name="line.77"></a>
+<span class="sourceLineNo">078</span>    resetActive();<a name="line.78"></a>
+<span class="sourceLineNo">079</span>    this.snapshot = SegmentFactory.instance().createImmutableSegment(c);<a name="line.79"></a>
+<span class="sourceLineNo">080</span>    this.snapshotId = NO_SNAPSHOT_ID;<a name="line.80"></a>
+<span class="sourceLineNo">081</span>  }<a name="line.81"></a>
+<span class="sourceLineNo">082</span><a name="line.82"></a>
+<span class="sourceLineNo">083</span>  protected void resetActive() {<a name="line.83"></a>
+<span class="sourceLineNo">084</span>    // Reset heap to not include any keys<a name="line.84"></a>
+<span class="sourceLineNo">085</span>    this.active = SegmentFactory.instance().createMutableSegment(conf, comparator);<a name="line.85"></a>
+<span class="sourceLineNo">086</span>    this.timeOfOldestEdit = Long.MAX_VALUE;<a name="line.86"></a>
+<span class="sourceLineNo">087</span>  }<a name="line.87"></a>
+<span class="sourceLineNo">088</span><a name="line.88"></a>
+<span class="sourceLineNo">089</span>  /**<a name="line.89"></a>
+<span class="sourceLineNo">090</span>   * Updates the wal with the lowest sequence id (oldest entry) that is still in memory<a name="line.90"></a>
+<span class="sourceLineNo">091</span>   * @param onlyIfMoreRecent a flag that marks whether to update the sequence id no matter what or<a name="line.91"></a>
+<span class="sourceLineNo">092</span>   *                      only if it is greater than the previous sequence id<a name="line.92"></a>
+<span class="sourceLineNo">093</span>   */<a name="line.93"></a>
+<span class="sourceLineNo">094</span>  public abstract void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent);<a name="line.94"></a>
+<span class="sourceLineNo">095</span><a name="line.95"></a>
+<span class="sourceLineNo">096</span>  @Override<a name="line.96"></a>
+<span class="sourceLineNo">097</span>  public void add(Iterable&lt;Cell&gt; cells, MemStoreSizing memstoreSizing) {<a name="line.97"></a>
+<span class="sourceLineNo">098</span>    for (Cell cell : cells) {<a name="line.98"></a>
+<span class="sourceLineNo">099</span>      add(cell, memstoreSizing);<a name="line.99"></a>
+<span class="sourceLineNo">100</span>    }<a name="line.100"></a>
+<span class="sourceLineNo">101</span>  }<a name="line.101"></a>
+<span class="sourceLineNo">102</span><a name="line.102"></a>
+<span class="sourceLineNo">103</span>  @Override<a name="line.103"></a>
+<span class="sourceLineNo">104</span>  public void add(Cell cell, MemStoreSizing memstoreSizing) {<a name="line.104"></a>
+<span class="sourceLineNo">105</span>    Cell toAdd = maybeCloneWithAllocator(cell, false);<a name="line.105"></a>
+<span class="sourceLineNo">106</span>    boolean mslabUsed = (toAdd != cell);<a name="line.106"></a>
+<span class="sourceLineNo">107</span>    // This cell data is backed by the same byte[] where we read request in RPC(See HBASE-15180). By<a name="line.107"></a>
+<span class="sourceLineNo">108</span>    // default MSLAB is ON and we might have copied cell to MSLAB area. If not we must do below deep<a name="line.108"></a>
+<span class="sourceLineNo">109</span>    // copy. Or else we will keep referring to the bigger chunk of memory and prevent it from<a name="line.109"></a>
+<span class="sourceLineNo">110</span>    // getting GCed.<a name="line.110"></a>
+<span class="sourceLineNo">111</span>    // Copy to MSLAB would not have happened if<a name="line.111"></a>
+<span class="sourceLineNo">112</span>    // 1. MSLAB is turned OFF. See "hbase.hregion.memstore.mslab.enabled"<a name="line.112"></a>
+<span class="sourceLineNo">113</span>    // 2. When the size of the cell is bigger than the max size supported by MSLAB. See<a name="line.113"></a>
+<span class="sourceLineNo">114</span>    // "hbase.hregion.memstore.mslab.max.allocation". This defaults to 256 KB<a name="line.114"></a>
+<span class="sourceLineNo">115</span>    // 3. When cells are from Append/Increment operation.<a name="line.115"></a>
+<span class="sourceLineNo">116</span>    if (!mslabUsed) {<a name="line.116"></a>
+<span class="sourceLineNo">117</span>      toAdd = deepCopyIfNeeded(toAdd);<a name="line.117"></a>
+<span class="sourceLineNo">118</span>    }<a name="line.118"></a>
+<span class="sourceLineNo">119</span>    internalAdd(toAdd, mslabUsed, memstoreSizing);<a name="line.119"></a>
+<span class="sourceLineNo">120</span>  }<a name="line.120"></a>
+<span class="sourceLineNo">121</span><a name="line.121"></a>
+<span class="sourceLineNo">122</span>  private static Cell deepCopyIfNeeded(Cell cell) {<a name="line.122"></a>
+<span class="sourceLineNo">123</span>    if (cell instanceof ExtendedCell) {<a name="line.123"></a>
+<span class="sourceLineNo">124</span>      return ((ExtendedCell) cell).deepClone();<a name="line.124"></a>
+<span class="sourceLineNo">125</span>    }<a name="line.125"></a>
+<span class="sourceLineNo">126</span>    return cell;<a name="line.126"></a>
+<span class="sourceLineNo">127</span>  }<a name="line.127"></a>
+<span class="sourceLineNo">128</span><a name="line.128"></a>
+<span class="sourceLineNo">129</span>  @Override<a name="line.129"></a>
+<span class="sourceLineNo">130</span>  public void upsert(Iterable&lt;Cell&gt; cells, long readpoint, MemStoreSizing memstoreSizing) {<a name="line.130"></a>
+<span class="sourceLineNo">131</span>    for (Cell cell : cells) {<a name="line.131"></a>
+<span class="sourceLineNo">132</span>      upsert(cell, readpoint, memstoreSizing);<a name="line.132"></a>
+<span class="sourceLineNo">133</span>    }<a name="line.133"></a>
+<span class="sourceLineNo">134</span>  }<a name="line.134"></a>
+<span class="sourceLineNo">135</span><a name="line.135"></a>
+<span class="sourceLineNo">136</span>  /**<a name="line.136"></a>
+<span class="sourceLineNo">137</span>   * @return Oldest timestamp of all the Cells in the MemStore<a name="line.137"></a>
+<span class="sourceLineNo">138</span>   */<a name="line.138"></a>
+<span class="sourceLineNo">139</span>  @Override<a name="line.139"></a>
+<span class="sourceLineNo">140</span>  public long timeOfOldestEdit() {<a name="line.140"></a>
+<span class="sourceLineNo">141</span>    return timeOfOldestEdit;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>  }<a name="line.142"></a>
+<span class="sourceLineNo">143</span><a name="line.143"></a>
+<span class="sourceLineNo">144</span>  /**<a name="line.144"></a>
+<span class="sourceLineNo">145</span>   * The passed snapshot was successfully persisted; it can be let go.<a name="line.145"></a>
+<span class="sourceLineNo">146</span>   * @param id Id of the snapshot to clean out.<a name="line.146"></a>
+<span class="sourceLineNo">147</span>   * @see MemStore#snapshot()<a name="line.147"></a>
+<span class="sourceLineNo">148</span>   */<a name="line.148"></a>
+<span class="sourceLineNo">149</span>  @Override<a name="line.149"></a>
+<span class="sourceLineNo">150</span>  public void clearSnapshot(long id) throws UnexpectedStateException {<a name="line.150"></a>
+<span class="sourceLineNo">151</span>    if (this.snapshotId == -1) return;  // already cleared<a name="line.151"></a>
+<span class="sourceLineNo">152</span>    if (this.snapshotId != id) {<a name="line.152"></a>
+<span class="sourceLineNo">153</span>      throw new UnexpectedStateException("Current snapshot id is " + this.snapshotId + ",passed "<a name="line.153"></a>
+<span class="sourceLineNo">154</span>          + id);<a name="line.154"></a>
+<span class="sourceLineNo">155</span>    }<a name="line.155"></a>
+<span class="sourceLineNo">156</span>    // OK. Passed in snapshot is same as current snapshot. If not-empty,<a name="line.156"></a>
+<span class="sourceLineNo">157</span>    // create a new snapshot and let the old one go.<a name="line.157"></a>
+<span class="sourceLineNo">158</span>    Segment oldSnapshot = this.snapshot;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>    if (!this.snapshot.isEmpty()) {<a name="line.159"></a>
+<span class="sourceLineNo">160</span>      this.snapshot = SegmentFactory.instance().createImmutableSegment(this.comparator);<a name="line.160"></a>
+<span class="sourceLineNo">161</span>    }<a name="line.161"></a>
+<span class="sourceLineNo">162</span>    this.snapshotId = NO_SNAPSHOT_ID;<a name="line.162"></a>
+<span class="sourceLineNo">163</span>    oldSnapshot.close();<a name="line.163"></a>
+<span class="sourceLineNo">164</span>  }<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>  @Override<a name="line.166"></a>
+<span class="sourceLineNo">167</span>  public MemStoreSize getSnapshotSize() {<a name="line.167"></a>
+<span class="sourceLineNo">168</span>    return getSnapshotSizing();<a name="line.168"></a>
+<span class="sourceLineNo">169</span>  }<a name="line.169"></a>
+<span class="sourceLineNo">170</span><a name="line.170"></a>
+<span class="sourceLineNo">171</span>  MemStoreSizing getSnapshotSizing() {<a name="line.171"></a>
+<span class="sourceLineNo">172</span>    return new MemStoreSizing(this.snapshot.keySize(),<a name="line.172"></a>
+<span class="sourceLineNo">173</span>        this.snapshot.heapSize(),<a name="line.173"></a>
+<span class="sourceLineNo">174</span>        this.snapshot.offHeapSize());<a name="line.174"></a>
+<span class="sourceLineNo">175</span>  }<a name="line.175"></a>
+<span class="sourceLineNo">176</span><a name="line.176"></a>
+<span class="sourceLineNo">177</span>  @Override<a name="line.177"></a>
+<span class="sourceLineNo">178</span>  public String toString() {<a name="line.178"></a>
+<span class="sourceLineNo">179</span>    StringBuilder buf = new StringBuilder();<a name="line.179"></a>
+<span class="sourceLineNo">180</span>    int i = 1;<a name="line.180"></a>
+<span class="sourceLineNo">181</span>    try {<a name="line.181"></a>
+<span class="sourceLineNo">182</span>      for (Segment segment : getSegments()) {<a name="line.182"></a>
+<span class="sourceLineNo">183</span>        buf.append("Segment (" + i + ") " + segment.toString() + "; ");<a name="line.183"></a>
+<span class="sourceLineNo">184</span>        i++;<a name="line.184"></a>
+<span class="sourceLineNo">185</span>      }<a name="line.185"></a>
+<span class="sourceLineNo">186</span>    } catch (IOException e){<a name="line.186"></a>
+<span class="sourceLineNo">187</span>      return e.toString();<a name="line.187"></a>
+<span class="sourceLineNo">188</span>    }<a name="line.188"></a>
+<span class="sourceLineNo">189</span>    return buf.toString();<a name="line.189"></a>
+<span class="sourceLineNo">190</span>  }<a name="line.190"></a>
+<span class="sourceLineNo">191</span><a name="line.191"></a>
+<span class="sourceLineNo">192</span>  protected Configuration getConfiguration() {<a name="line.192"></a>
+<span class="sourceLineNo">193</span>    return conf;<a name="line.193"></a>
+<span class="sourceLineNo">194</span>  }<a name="line.194"></a>
+<span class="sourceLineNo">195</span><a name="line.195"></a>
+<span class="sourceLineNo">196</span>  protected void dump(Logger log) {<a name="line.196"></a>
+<span class="sourceLineNo">197</span>    active.dump(log);<a name="line.197"></a>
+<span class="sourceLineNo">198</span>    snapshot.dump(log);<a name="line.198"></a>
+<span class="sourceLineNo">199</span>  }<a name="line.199"></a>
+<span class="sourceLineNo">200</span><a name="line.200"></a>
+<span class="sourceLineNo">201</span><a name="line.201"></a>
+<span class="sourceLineNo">202</span>  /*<a name="line.202"></a>
+<span class="sourceLineNo">203</span>   * Inserts the specified Cell into MemStore and deletes any existing<a name="line.203"></a>
+<span class="sourceLineNo">204</span>   * versions of the same row/family/qualifier as the specified Cell.<a name="line.204"></a>
+<span class="sourceLineNo">205</span>   * &lt;p&gt;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>   * First, the specified Cell is inserted into the Memstore.<a name="line.206"></a>
 <span class="sourceLineNo">207</span>   * &lt;p&gt;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>   * First, the specified Cell is inserted into the Memstore.<a name="line.208"></a>
-<span class="sourceLineNo">209</span>   * &lt;p&gt;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>   * If there are any existing Cell in this MemStore with the same row,<a name="line.210"></a>
-<span class="sourceLineNo">211</span>   * family, and qualifier, they are removed.<a name="line.211"></a>
-<span class="sourceLineNo">212</span>   * &lt;p&gt;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>   * Callers must hold the read lock.<a name="line.213"></a>
-<span class="sourceLineNo">214</span>   *<a name="line.214"></a>
-<span class="sourceLineNo">215</span>   * @param cell the cell to be updated<a name="line.215"></a>
-<span class="sourceLineNo">216</span>   * @param readpoint readpoint below which we can safely remove duplicate KVs<a name="line.216"></a>
-<span class="sourceLineNo">217</span>   * @param memstoreSize<a name="line.217"></a>
-<span class="sourceLineNo">218</span>   */<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) {<a name="line.219"></a>
-<span class="sourceLineNo">220</span>    // Add the Cell to the MemStore<a name="line.220"></a>
-<span class="sourceLineNo">221</span>    // Use the internalAdd method here since we (a) already have a lock<a name="line.221"></a>
-<span class="sourceLineNo">222</span>    // and (b) cannot safely use the MSLAB here without potentially<a name="line.222"></a>
-<span class="sourceLineNo">223</span>    // hitting OOME - see TestMemStore.testUpsertMSLAB for a<a name="line.223"></a>
-<span class="sourceLineNo">224</span>    // test that triggers the pathological case if we don't avoid MSLAB<a name="line.224"></a>
-<span class="sourceLineNo">225</span>    // here.<a name="line.225"></a>
-<span class="sourceLineNo">226</span>    // This cell data is backed by the same byte[] where we read request in RPC(See HBASE-15180). We<a name="line.226"></a>
-<span class="sourceLineNo">227</span>    // must do below deep copy. Or else we will keep referring to the bigger chunk of memory and<a name="line.227"></a>
-<span class="sourceLineNo">228</span>    // prevent it from getting GCed.<a name="line.228"></a>
-<span class="sourceLineNo">229</span>    cell = deepCopyIfNeeded(cell);<a name="line.229"></a>
-<span class="sourceLineNo">230</span>    this.active.upsert(cell, readpoint, memstoreSizing);<a name="line.230"></a>
-<span class="sourceLineNo">231</span>    setOldestEditTimeToNow();<a name="line.231"></a>
-<span class="sourceLineNo">232</span>    checkActiveSize();<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  }<a name="line.233"></a>
-<span class="sourceLineNo">234</span><a name="line.234"></a>
-<span class="sourceLineNo">235</span>  /*<a name="line.235"></a>
-<span class="sourceLineNo">236</span>   * @param a<a name="line.236"></a>
-<span class="sourceLineNo">237</span>   * @param b<a name="line.237"></a>
-<span class="sourceLineNo">238</span>   * @return Return lowest of a or b or null if both a and b are null<a name="line.238"></a>
-<span class="sourceLineNo">239</span>   */<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  protected Cell getLowest(final Cell a, final Cell b) {<a name="line.240"></a>
-<span class="sourceLineNo">241</span>    if (a == null) {<a name="line.241"></a>
-<span class="sourceLineNo">242</span>      return b;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>    }<a name="line.243"></a>
-<span class="sourceLineNo">244</span>    if (b == null) {<a name="line.244"></a>
-<span class="sourceLineNo">245</span>      return a;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>    }<a name="line.246"></a>
-<span class="sourceLineNo">247</span>    return comparator.compareRows(a, b) &lt;= 0? a: b;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  }<a name="line.248"></a>
-<span class="sourceLineNo">249</span><a name="line.249"></a>
-<span class="sourceLineNo">250</span>  /*<a name="line.250"></a>
-<span class="sourceLineNo">251</span>   * @param key Find row that follows this one.  If null, return first.<a name="line.251"></a>
-<span class="sourceLineNo">252</span>   * @param set Set to look in for a row beyond &lt;code&gt;row&lt;/code&gt;.<a name="line.252"></a>
-<span class="sourceLineNo">253</span>   * @return Next row or null if none found.  If one found, will be a new<a name="line.253"></a>
-<span class="sourceLineNo">254</span>   * KeyValue -- can be destroyed by subsequent calls to this method.<a name="line.254"></a>
-<span class="sourceLineNo">255</span>   */<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  protected Cell getNextRow(final Cell key,<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      final NavigableSet&lt;Cell&gt; set) {<a name="line.257"></a>
-<span class="sourceLineNo">258</span>    Cell result = null;<a name="line.258"></a>
-<span class="sourceLineNo">259</span>    SortedSet&lt;Cell&gt; tail = key == null? set: set.tailSet(key);<a name="line.259"></a>
-<span class="sourceLineNo">260</span>    // Iterate until we fall into the next row; i.e. move off current row<a name="line.260"></a>
-<span class="sourceLineNo">261</span>    for (Cell cell: tail) {<a name="line.261"></a>
-<span class="sourceLineNo">262</span>      if (comparator.compareRows(cell, key) &lt;= 0) {<a name="line.262"></a>
-<span class="sourceLineNo">263</span>        continue;<a name="line.263"></a>
-<span class="sourceLineNo">264</span>      }<a name="line.264"></a>
-<span class="sourceLineNo">265</span>      // Note: Not suppressing deletes or expired cells.  Needs to be handled<a name="line.265"></a>
-<span class="sourceLineNo">266</span>      // by higher up functions.<a name="line.266"></a>
-<span class="sourceLineNo">267</span>      result = cell;<a name="line.267"></a>
-<span class="sourceLineNo">268</span>      break;<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    }<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    return result;<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  }<a name="line.271"></a>
-<span class="sourceLineNo">272</span><a name="line.272"></a>
-<span class="sourceLineNo">273</span>  /**<a name="line.273"></a>
-<span class="sourceLineNo">274</span>   * If the segment has a memory allocator the cell is being cloned to this space, and returned;<a name="line.274"></a>
-<span class="sourceLineNo">275</span>   * Otherwise the given cell is returned<a name="line.275"></a>
-<span class="sourceLineNo">276</span>   *<a name="line.276"></a>
-<span class="sourceLineNo">277</span>   * When a cell's size is too big (bigger than maxAlloc), it is not allocated on MSLAB.<a name="line.277"></a>
-<span class="sourceLineNo">278</span>   * Since the process of flattening to CellChunkMap assumes that all cells are allocated on MSLAB,<a name="line.278"></a>
-<span class="sourceLineNo">279</span>   * during this process, the input parameter forceCloneOfBigCell is set to 'true'<a name="line.279"></a>
-<span class="sourceLineNo">280</span>   * and the cell is copied into MSLAB.<a name="line.280"></a>
-<span class="sourceLineNo">281</span>   *<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * @param cell the cell to clone<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   * @param forceCloneOfBigCell true only during the process of flattening to CellChunkMap.<a name="line.283"></a>
-<span class="sourceLineNo">284</span>   * @return either the given cell or its clone<a name="line.284"></a>
-<span class="sourceLineNo">285</span>   */<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {<a name="line.286"></a>
-<span class="sourceLineNo">287</span>    return active.maybeCloneWithAllocator(cell, forceCloneOfBigCell);<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  }<a name="line.288"></a>
-<span class="sourceLineNo">289</span><a name="line.289"></a>
-<span class="sourceLineNo">290</span>  /*<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   * Internal version of add() that doesn't clone Cells with the<a name="line.291"></a>
-<span class="sourceLineNo">292</span>   * allocator, and doesn't take the lock.<a name="line.292"></a>
-<span class="sourceLineNo">293</span>   *<a name="line.293"></a>
-<span class="sourceLineNo">294</span>   * Callers should ensure they already have the read lock taken<a name="line.294"></a>
-<span class="sourceLineNo">295</span>   * @param toAdd the cell to add<a name="line.295"></a>
-<span class="sourceLineNo">296</span>   * @param mslabUsed whether using MSLAB<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * @param memstoreSize<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
-<span class="sourceLineNo">299</span>  private void internalAdd(final Cell toAdd, final boolean mslabUsed, MemStoreSizing memstoreSizing) {<a name="line.299"></a>
-<span class="sourceLineNo">300</span>    active.add(toAdd, mslabUsed, memstoreSizing);<a name="line.300"></a>
-<span class="sourceLineNo">301</span>    setOldestEditTimeToNow();<a name="line.301"></a>
-<span class="sourceLineNo">302</span>    checkActiveSize();<a name="line.302"></a>
-<span class="sourceLineNo">303</span>  }<a name="line.303"></a>
-<span class="sourceLineNo">304</span><a name="line.304"></a>
-<span class="sourceLineNo">305</span>  private void setOldestEditTimeToNow() {<a name="line.305"></a>
-<span class="sourceLineNo">306</span>    if (timeOfOldestEdit == Long.MAX_VALUE) {<a name="line.306"></a>
-<span class="sourceLineNo">307</span>      timeOfOldestEdit = EnvironmentEdgeManager.currentTime();<a name="line.307"></a>
-<span class="sourceLineNo">308</span>    }<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  }<a name="line.309"></a>
-<span class="sourceLineNo">310</span><a name="line.310"></a>
-<span class="sourceLineNo">311</span>  /**<a name="line.311"></a>
-<span class="sourceLineNo">312</span>   * @return The total size of cells in this memstore. We will not consider cells in the snapshot<a name="line.312"></a>
-<span class="sourceLineNo">313</span>   */<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  protected abstract long keySize();<a name="line.314"></a>
-<span class="sourceLineNo">315</span><a name="line.315"></a>
-<span class="sourceLineNo">316</span>  /**<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   * @return The total heap size of cells in this memstore. We will not consider cells in the<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   *         snapshot<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   */<a name="line.319"></a>
-<span class="sourceLineNo">320</span>  protected abstract long heapSize();<a name="line.320"></a>
-<span class="sourceLineNo">321</span><a name="line.321"></a>
-<span class="sourceLineNo">322</span>  protected CellComparator getComparator() {<a name="line.322"></a>
-<span class="sourceLineNo">323</span>    return comparator;<a name="line.323"></a>
-<span class="sourceLineNo">324</span>  }<a name="line.324"></a>
-<span class="sourceLineNo">325</span><a name="line.325"></a>
-<span class="sourceLineNo">326</span>  @VisibleForTesting<a name="line.326"></a>
-<span class="sourceLineNo">327</span>  MutableSegment getActive() {<a name="line.327"></a>
-<span class="sourceLineNo">328</span>    return active;<a name="line.328"></a>
-<span class="sourceLineNo">329</span>  }<a name="line.329"></a>
-<span class="sourceLineNo">330</span><a name="line.330"></a>
-<span class="sourceLineNo">331</span>  @VisibleForTesting<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  ImmutableSegment getSnapshot() {<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    return snapshot;<a name="line.333"></a>
-<span class="sourceLineNo">334</span>  }<a name="line.334"></a>
-<span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  /**<a name="line.336"></a>
-<span class="sourceLineNo">337</span>   * Check whether anything need to be done based on the current active set size<a name="line.337"></a>
-<span class="sourceLineNo">338</span>   */<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  protected abstract void checkActiveSize();<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * @return an ordered list of segments from most recent to oldest in memstore<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   */<a name="line.343"></a>
-<span class="sourceLineNo">344</span>  protected abstract List&lt;Segment&gt; getSegments() throws IOException;<a name="line.344"></a>
-<span class="sourceLineNo">345</span><a name="line.345"></a>
-<span class="sourceLineNo">346</span>}<a name="line.346"></a>
+<span class="sourceLineNo">208</span>   * If there are any existing Cell in this MemStore with the same row,<a name="line.208"></a>
+<span class="sourceLineNo">209</span>   * family, and qualifier, they are removed.<a name="line.209"></a>
+<span class="sourceLineNo">210</span>   * &lt;p&gt;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>   * Callers must hold the read lock.<a name="line.211"></a>
+<span class="sourceLineNo">212</span>   *<a name="line.212"></a>
+<span class="sourceLineNo">213</span>   * @param cell the cell to be updated<a name="line.213"></a>
+<span class="sourceLineNo">214</span>   * @param readpoint readpoint below which we can safely remove duplicate KVs<a name="line.214"></a>
+<span class="sourceLineNo">215</span>   * @param memstoreSize<a name="line.215"></a>
+<span class="sourceLineNo">216</span>   */<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  private void upsert(Cell cell, long readpoint, MemStoreSizing memstoreSizing) {<a name="line.217"></a>
+<span class="sourceLineNo">218</span>    // Add the Cell to the MemStore<a name="line.218"></a>
+<span class="sourceLineNo">219</span>    // Use the internalAdd method here since we (a) already have a lock<a name="line.219"></a>
+<span class="sourceLineNo">220</span>    // and (b) cannot safely use the MSLAB here without potentially<a name="line.220"></a>
+<span class="sourceLineNo">221</span>    // hitting OOME - see TestMemStore.testUpsertMSLAB for a<a name="line.221"></a>
+<span class="sourceLineNo">222</span>    // test that triggers the pathological case if we don't avoid MSLAB<a name="line.222"></a>
+<span class="sourceLineNo">223</span>    // here.<a name="line.223"></a>
+<span class="sourceLineNo">224</span>    // This cell data is backed by the same byte[] where we read request in RPC(See HBASE-15180). We<a name="line.224"></a>
+<span class="sourceLineNo">225</span>    // must do below deep copy. Or else we will keep referring to the bigger chunk of memory and<a name="line.225"></a>
+<span class="sourceLineNo">226</span>    // prevent it from getting GCed.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>    cell = deepCopyIfNeeded(cell);<a name="line.227"></a>
+<span class="sourceLineNo">228</span>    this.active.upsert(cell, readpoint, memstoreSizing);<a name="line.228"></a>
+<span class="sourceLineNo">229</span>    setOldestEditTimeToNow();<a name="line.229"></a>
+<span class="sourceLineNo">230</span>    checkActiveSize();<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  }<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>  /*<a name="line.233"></a>
+<span class="sourceLineNo">234</span>   * @param a<a name="line.234"></a>
+<span class="sourceLineNo">235</span>   * @param b<a name="line.235"></a>
+<span class="sourceLineNo">236</span>   * @return Return lowest of a or b or null if both a and b are null<a name="line.236"></a>
+<span class="sourceLineNo">237</span>   */<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  protected Cell getLowest(final Cell a, final Cell b) {<a name="line.238"></a>
+<span class="sourceLineNo">239</span>    if (a == null) {<a name="line.239"></a>
+<span class="sourceLineNo">240</span>      return b;<a name="line.240"></a>
+<span class="sourceLineNo">241</span>    }<a name="line.241"></a>
+<span class="sourceLineNo">242</span>    if (b == null) {<a name="line.242"></a>
+<span class="sourceLineNo">243</span>      return a;<a name="line.243"></a>
+<span class="sourceLineNo">244</span>    }<a name="line.244"></a>
+<span class="sourceLineNo">245</span>    return comparator.compareRows(a, b) &lt;= 0? a: b;<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  }<a name="line.246"></a>
+<span class="sourceLineNo">247</span><a name="line.247"></a>
+<span class="sourceLineNo">248</span>  /*<a name="line.248"></a>
+<span class="sourceLineNo">249</span>   * @param key Find row that follows this one.  If null, return first.<a name="line.249"></a>
+<span class="sourceLineNo">250</span>   * @param set Set to look in for a row beyond &lt;code&gt;row&lt;/code&gt;.<a name="line.250"></a>
+<span class="sourceLineNo">251</span>   * @return Next row or null if none found.  If one found, will be a new<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * KeyValue -- can be destroyed by subsequent calls to this method.<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   */<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  protected Cell getNextRow(final Cell key,<a name="line.254"></a>
+<span class="sourceLineNo">255</span>      final NavigableSet&lt;Cell&gt; set) {<a name="line.255"></a>
+<span class="sourceLineNo">256</span>    Cell result = null;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>    SortedSet&lt;Cell&gt; tail = key == null? set: set.tailSet(key);<a name="line.257"></a>
+<span class="sourceLineNo">258</span>    // Iterate until we fall into the next row; i.e. move off current row<a name="line.258"></a>
+<span class="sourceLineNo">259</span>    for (Cell cell: tail) {<a name="line.259"></a>
+<span class="sourceLineNo">260</span>      if (comparator.compareRows(cell, key) &lt;= 0) {<a name="line.260"></a>
+<span class="sourceLineNo">261</span>        continue;<a name="line.261"></a>
+<span class="sourceLineNo">262</span>      }<a name="line.262"></a>
+<span class="sourceLineNo">263</span>      // Note: Not suppressing deletes or expired cells.  Needs to be handled<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      // by higher up functions.<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      result = cell;<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      break;<a name="line.266"></a>
+<span class="sourceLineNo">267</span>    }<a name="line.267"></a>
+<span class="sourceLineNo">268</span>    return result;<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  }<a name="line.269"></a>
+<span class="sourceLineNo">270</span><a name="line.270"></a>
+<span class="sourceLineNo">271</span>  /**<a name="line.271"></a>
+<span class="sourceLineNo">272</span>   * If the segment has a memory allocator the cell is being cloned to this space, and returned;<a name="line.272"></a>
+<span class="sourceLineNo">273</span>   * Otherwise the given cell is returned<a name="line.273"></a>
+<span class="sourceLineNo">274</span>   *<a name="line.274"></a>
+<span class="sourceLineNo">275</span>   * When a cell's size is too big (bigger than maxAlloc), it is not allocated on MSLAB.<a name="line.275"></a>
+<span class="sourceLineNo">276</span>   * Since the process of flattening to CellChunkMap assumes that all cells are allocated on MSLAB,<a name="line.276"></a>
+<span class="sourceLineNo">277</span>   * during this process, the input parameter forceCloneOfBigCell is set to 'true'<a name="line.277"></a>
+<span class="sourceLineNo">278</span>   * and the cell is copied into MSLAB.<a name="line.278"></a>
+<span class="sourceLineNo">279</span>   *<a name="line.279"></a>
+<span class="sourceLineNo">280</span>   * @param cell the cell to clone<a name="line.280"></a>
+<span class="sourceLineNo">281</span>   * @param forceCloneOfBigCell true only during the process of flattening to CellChunkMap.<a name="line.281"></a>
+<span class="sourceLineNo">282</span>   * @return either the given cell or its clone<a name="line.282"></a>
+<span class="sourceLineNo">283</span>   */<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {<a name="line.284"></a>
+<span class="sourceLineNo">285</span>    return active.maybeCloneWithAllocator(cell, forceCloneOfBigCell);<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  }<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * Internal version of add() that doesn't clone Cells with the<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   * allocator, and doesn't take the lock.<a name="line.290"></a>
+<span class="sourceLineNo">291</span>   *<a name="line.291"></a>
+<span class="sourceLineNo">292</span>   * Callers should ensure they already have the read lock taken<a name="line.292"></a>
+<span class="sourceLineNo">293</span>   * @param toAdd the cell to add<a name="line.293"></a>
+<span class="sourceLineNo">294</span>   * @param mslabUsed whether using MSLAB<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * @param memstoreSize<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   */<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private void internalAdd(final Cell toAdd, final boolean mslabUsed, MemStoreSizing memstoreSizing) {<a name="line.297"></a>
+<span class="sourceLineNo">298</span>    active.add(toAdd, mslabUsed, memstoreSizing);<a name="line.298"></a>
+<span class="sourceLineNo">299</span>    setOldestEditTimeToNow();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>    checkActiveSize();<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  }<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private void setOldestEditTimeToNow() {<a name="line.303"></a>
+<span class="sourceLineNo">304</span>    if (timeOfOldestEdit == Long.MAX_VALUE) {<a name="line.304"></a>
+<span class="sourceLineNo">305</span>      timeOfOldestEdit = EnvironmentEdgeManager.currentTime();<a name="line.305"></a>
+<span class="sourceLineNo">306</span>    }<a name="line.306"></a>
+<span class="sourceLineNo">307</span>  }<a name="line.307"></a>
+<span class="sourceLineNo">308</span><a name="line.308"></a>
+<span class="sourceLineNo">309</span>  /**<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * @return The total size of cells in this memstore. We will not consider cells in the snapshot<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   */<a name="line.311"></a>
+<span class="sourceLineNo">312</span>  protected abstract long keySize();<a name="line.312"></a>
+<span class="sourceLineNo">313</span><a name="line.313"></a>
+<span class="sourceLineNo">314</span>  /**<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * @return The total heap size of cells in this memstore. We will not consider cells in the<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   *         snapshot<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  protected abstract long heapSize();<a name="line.318"></a>
+<span class="sourceLineNo">319</span><a name="line.319"></a>
+<span class="sourceLineNo">320</span>  protected CellComparator getComparator() {<a name="line.320"></a>
+<span class="sourceLineNo">321</span>    return comparator;<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  }<a name="line.322"></a>
+<span class="sourceLineNo">323</span><a name="line.323"></a>
+<span class="sourceLineNo">324</span>  @VisibleForTesting<a name="line.324"></a>
+<span class="sourceLineNo">325</span>  MutableSegment getActive() {<a name="line.325"></a>
+<span class="sourceLineNo">326</span>    return active;<a name="line.326"></a>
+<span class="sourceLineNo">327</span>  }<a name="line.327"></a>
+<span class="sourceLineNo">328</span><a name="line.328"></a>
+<span class="sourceLineNo">329</span>  @VisibleForTesting<a name="line.329"></a>
+<span class="sourceLineNo">330</span>  ImmutableSegment getSnapshot() {<a name="line.330"></a>
+<span class="sourceLineNo">331</span>    return snapshot;<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  }<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * Check whether anything need to be done based on the current active set size<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  protected abstract void checkActiveSize();<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  /**<a name="line.339"></a>
+<span class="sourceLineNo">340</span>   * @return an ordered list of segments from most recent to oldest in memstore<a name="line.340"></a>
+<span class="sourceLineNo">341</span>   */<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  protected abstract List&lt;Segment&gt; getSegments() throws IOException;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>}<a name="line.344"></a>
 
 
 


[09/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.

<TRUNCATED>

[36/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
index 9b58b53..5b0e58f 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -274,38 +274,32 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getNextIndexedKey--">getNextIndexedKey</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i5" class="rowColor">
-<td class="colFirst"><code>long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getScannerOrder--">getScannerOrder</a></span>()</code>
-<div class="block">Get the order of this KeyValueScanner.</div>
-</td>
-</tr>
-<tr id="i6" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#next--">next</a></span>()</code>
 <div class="block">Return the next Cell in this scanner, iterating the scanner</div>
 </td>
 </tr>
-<tr id="i7" class="rowColor">
+<tr id="i6" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#next-java.util.List-org.apache.hadoop.hbase.regionserver.ScannerContext-">next</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;result,
     <a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.html" title="class in org.apache.hadoop.hbase.regionserver">ScannerContext</a>&nbsp;scannerContext)</code>
 <div class="block">Gets the next row of keys from the top-most scanner.</div>
 </td>
 </tr>
-<tr id="i8" class="altColor">
+<tr id="i7" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#peek--">peek</a></span>()</code>
 <div class="block">Look at the next Cell in this scanner, but do not iterate scanner.</div>
 </td>
 </tr>
-<tr id="i9" class="rowColor">
+<tr id="i8" class="altColor">
 <td class="colFirst"><code>protected <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#pollRealKV--">pollRealKV</a></span>()</code>
 <div class="block">Fetches the top sub-scanner from the priority queue, ensuring that a real
  seek has been done on it.</div>
 </td>
 </tr>
-<tr id="i10" class="altColor">
+<tr id="i9" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#requestSeek-org.apache.hadoop.hbase.Cell-boolean-boolean-">requestSeek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;key,
            boolean&nbsp;forward,
@@ -315,20 +309,20 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
  row/column combination specified by the kv parameter.</div>
 </td>
 </tr>
-<tr id="i11" class="rowColor">
+<tr id="i10" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#reseek-org.apache.hadoop.hbase.Cell-">reseek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;seekKey)</code>
 <div class="block">This function is identical to the <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#seek-org.apache.hadoop.hbase.Cell-"><code>seek(Cell)</code></a> function except
  that scanner.seek(seekKey) is changed to scanner.reseek(seekKey).</div>
 </td>
 </tr>
-<tr id="i12" class="altColor">
+<tr id="i11" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#seek-org.apache.hadoop.hbase.Cell-">seek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;seekKey)</code>
 <div class="block">Seeks all scanners at or below the specified seek key.</div>
 </td>
 </tr>
-<tr id="i13" class="rowColor">
+<tr id="i12" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#shipped--">shipped</a></span>()</code>
 <div class="block">Called after a batch of rows scanned and set to be returned to client.</div>
@@ -361,7 +355,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#backwardSeek-org.apache.hadoop.hbase.Cell-">backwardSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToLastRow--">seekToLastRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToPreviousRow-org.apache.hadoop.hbase.Cell-">seekToPreviousRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.had
 oop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#backwardSeek-org.apache.hadoop.hbase.Cell-">backwardSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToLastRow--">seekToLastRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToPreviousRow-org.apache.hadoop.hbase.Cell-">seekToPrevious
 Row</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">
@@ -719,34 +713,13 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 </dl>
 </li>
 </ul>
-<a name="getScannerOrder--">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>getScannerOrder</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.422">getScannerOrder</a>()</pre>
-<div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">KeyValueScanner</a></code></span></div>
-<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and
- MemStoreScanners (other scanners simply return 0). This is required for comparing multiple
- files to find out which one has the latest data. StoreFileScanners are ordered from 0
- (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always
- contains freshest data.</div>
-<dl>
-<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
-<dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
-<dt><span class="seeLabel">See Also:</span></dt>
-<dd><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--"><code>KeyValueScanner.getScannerOrder()</code></a></dd>
-</dl>
-</li>
-</ul>
 <a name="getCurrentForTesting--">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>getCurrentForTesting</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.427">getCurrentForTesting</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.420">getCurrentForTesting</a>()</pre>
 </li>
 </ul>
 <a name="getNextIndexedKey--">
@@ -755,7 +728,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getNextIndexedKey</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.432">getNextIndexedKey</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.425">getNextIndexedKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getNextIndexedKey--">getNextIndexedKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
@@ -774,7 +747,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockListLast">
 <li class="blockList">
 <h4>shipped</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.438">shipped</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#line.431">shipped</a>()
              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Shipper.html#shipped--">Shipper</a></code></span></div>
 <div class="block">Called after a batch of rows scanned and set to be returned to client. Any in between cleanup

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
index 50b2fca..64c839f 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
@@ -18,8 +18,8 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]};
+var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -148,7 +148,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Shipper.htm
 </a>
 <h3>Method Summary</h3>
 <table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
-<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd">&nbsp;</span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd">&nbsp;</span></span><span id="t3" class="tableTab"><span><a href="javascript:show(4);">Abstract Methods</a></span><span class="tabEnd">&nbsp;</span></span></caption>
+<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd">&nbsp;</span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd">&nbsp;</span></span><span id="t3" class="tableTab"><span><a href="javascript:show(4);">Abstract Methods</a></span><span class="tabEnd">&nbsp;</span></span><span id="t5" class="tableTab"><span><a href="javascript:show(16);">Default Methods</a></span><span class="tabEnd">&nbsp;</span></span></caption>
 <tr>
 <th class="colFirst" scope="col">Modifier and Type</th>
 <th class="colLast" scope="col">Method and Description</th>
@@ -185,7 +185,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Shipper.htm
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getNextIndexedKey--">getNextIndexedKey</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i5" class="rowColor">
-<td class="colFirst"><code>long</code></td>
+<td class="colFirst"><code>default long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></span>()</code>
 <div class="block">Get the order of this KeyValueScanner.</div>
 </td>
@@ -379,12 +379,10 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Shipper.htm
 <ul class="blockList">
 <li class="blockList">
 <h4>getScannerOrder</h4>
-<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#line.82">getScannerOrder</a>()</pre>
-<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and
- MemStoreScanners (other scanners simply return 0). This is required for comparing multiple
- files to find out which one has the latest data. StoreFileScanners are ordered from 0
- (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always
- contains freshest data.</div>
+<pre>default&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#line.80">getScannerOrder</a>()</pre>
+<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners.
+ This is required for comparing multiple files to find out which one has the latest
+ data. StoreFileScanners are ordered from 0 (oldest) to newest in increasing order.</div>
 </li>
 </ul>
 <a name="close--">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
index 719ab9f..0d330cd 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
@@ -343,7 +343,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>hasNext</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.70">hasNext</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.67">hasNext</a>()</pre>
 </li>
 </ul>
 <a name="next--">
@@ -352,7 +352,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>next</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.79">next</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.76">next</a>()</pre>
 </li>
 </ul>
 <a name="close--">
@@ -361,7 +361,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>close</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.87">close</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.84">close</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.html#close--">close</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSegmentsIterator</a></code></dd>
@@ -374,7 +374,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>remove</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.98">remove</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.95">remove</a>()</pre>
 </li>
 </ul>
 <a name="createScanner-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">
@@ -383,7 +383,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>createScanner</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/InternalScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">InternalScanner</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.106">createScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html" title="class in org.apache.hadoop.hbase.regionserver">HStore</a>&nbsp;store,
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/InternalScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">InternalScanner</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.103">createScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html" title="class in org.apache.hadoop.hbase.regionserver">HStore</a>&nbsp;store,
                                       <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)
                                throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Creates the scanner for compacting the pipeline.</div>
@@ -401,7 +401,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockListLast">
 <li class="blockList">
 <h4>refillKVS</h4>
-<pre>private&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.144">refillKVS</a>()</pre>
+<pre>private&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html#line.141">refillKVS</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
index fc2fb5b..6acb102 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
@@ -304,7 +304,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>hasNext</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.56">hasNext</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.55">hasNext</a>()</pre>
 </li>
 </ul>
 <a name="next--">
@@ -313,7 +313,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>next</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.68">next</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.67">next</a>()</pre>
 </li>
 </ul>
 <a name="close--">
@@ -322,7 +322,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockList">
 <li class="blockList">
 <h4>close</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.80">close</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.79">close</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.html#close--">close</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSegmentsIterator</a></code></dd>
@@ -335,7 +335,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSeg
 <ul class="blockListLast">
 <li class="blockList">
 <h4>remove</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.98">remove</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html#line.97">remove</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/MobStoreScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MobStoreScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/MobStoreScanner.html
index 0c4b89e..60f4eac 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MobStoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MobStoreScanner.html
@@ -230,7 +230,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanne
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html" title="class in org.apache.hadoop.hbase.regionserver">StoreScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkFlushed--">checkFlushed</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkScanOrder-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.CellComparator-">checkScanOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#enableLazySeekGlobally-boolean-">enableLazySeekGlobally</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getAllScannersForTesting--">getAllScannersForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getEstimatedNumberOfKvsScanned--">getEstimatedNumberOfKvsScanned</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../..
 /../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getScannerOrder--">getScannerOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reseek-org.apache.hadoop.hbase.Cell-">reseek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#resetKVHeap-java.util.List-org.apache.hadoop.hbase.CellComparator-">resetKVHeap</a>, <a href="../../../../../org/apache/hadoop/hbase/regionser
 ver/StoreScanner.html#seek-org.apache.hadoop.hbase.Cell-">seek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekAsDirection-org.apache.hadoop.hbase.Cell-">seekAsDirection</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekScanners-java.util.List-org.apache.hadoop.hbase.Cell-boolean-boolean-">seekScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekToNextRow-org.apache.hadoop.hbase.Cell-">seekToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hadoop.hbase.Cell-">trySkipToNextColumn</a>, <a href="../../../../../or
 g/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Cell-">trySkipToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToStreamRead--">trySwitchToStreamRead</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkFlushed--">checkFlushed</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkScanOrder-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.CellComparator-">checkScanOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#enableLazySeekGlobally-boolean-">enableLazySeekGlobally</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getAllScannersForTesting--">getAllScannersForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getEstimatedNumberOfKvsScanned--">getEstimatedNumberOfKvsScanned</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../..
 /../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reseek-org.apache.hadoop.hbase.Cell-">reseek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#resetKVHeap-java.util.List-org.apache.hadoop.hbase.CellComparator-">resetKVHeap</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seek-org.apache.hadoop.hbase.Cell-">seek</a>, <a href="../../../../../org/apache/hadoop/hbase/reg
 ionserver/StoreScanner.html#seekAsDirection-org.apache.hadoop.hbase.Cell-">seekAsDirection</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekScanners-java.util.List-org.apache.hadoop.hbase.Cell-boolean-boolean-">seekScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekToNextRow-org.apache.hadoop.hbase.Cell-">seekToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hadoop.hbase.Cell-">trySkipToNextColumn</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Cell-">trySkipToNextRow</
 a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToStreamRead--">trySwitchToStreamRead</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">
@@ -258,7 +258,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanne
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#backwardSeek-org.apache.hadoop.hbase.Cell-">backwardSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#requestSeek-org.apache.hadoop.hbase.Cell-boolean-boolean-">requestSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToLastRow--">seekToLastRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToPreviousRow-org.apach
 e.hadoop.hbase.Cell-">seekToPreviousRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#backwardSeek-org.apache.hadoop.hbase.Cell-">backwardSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#requestSeek-org.apache.hadoop.hbase.Cell-boolean-boolean-">requestSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToLastRow--">seekTo
 LastRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToPreviousRow-org.apache.hadoop.hbase.Cell-">seekToPreviousRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
index fb1e8df..24feed5 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
@@ -213,7 +213,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.htm
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache
 .hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apa
 che/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.h
 base.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#toString--">toString</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--
 ">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/reg
 ionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/a
 pache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#toString--">toString</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
index 034b179..d6b7415 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
@@ -281,7 +281,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHea
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html" title="class in org.apache.hadoop.hbase.regionserver">KeyValueHeap</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getCurrentForTesting--">getCurrentForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getHeap--">getHeap</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getScannerOrder--">getScannerOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#next-java.util.List-org.apache.hadoop.hbase.regionserver.ScannerContext-">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#pollRealKV--">pollRealKV</a>, <a href="../../../../../org/apache/hadoop/hbase/r
 egionserver/KeyValueHeap.html#shipped--">shipped</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getCurrentForTesting--">getCurrentForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getHeap--">getHeap</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#next-java.util.List-org.apache.hadoop.hbase.regionserver.ScannerContext-">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#pollRealKV--">pollRealKV</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHeap.html#shipped--">shipped</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner">
@@ -302,7 +302,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueHea
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
index 11c6932..cd705d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
@@ -242,7 +242,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ReversedSto
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html" title="class in org.apache.hadoop.hbase.regionserver">StoreScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkFlushed--">checkFlushed</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#enableLazySeekGlobally-boolean-">enableLazySeekGlobally</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getAllScannersForTesting--">getAllScannersForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getEstimatedNumberOfKvsScanned--">getEstimatedNumberOfKvsScanned</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getScannerOrder--">getScannerOrder</a>
 , <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hadoop.hbase.Cell-">trySkipToNextColumn</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Ce
 ll-">trySkipToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToStreamRead--">trySwitchToStreamRead</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkFlushed--">checkFlushed</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#enableLazySeekGlobally-boolean-">enableLazySeekGlobally</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getAllScannersForTesting--">getAllScannersForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getEstimatedNumberOfKvsScanned--">getEstimatedNumberOfKvsScanned</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a>, 
 <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hadoop.hbase.Cell-">trySkipToNextColumn</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Cell-">trySkipToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToSt
 reamRead--">trySwitchToStreamRead</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.html
index 0e1df08..97c4715 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.html
@@ -276,7 +276,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html" title="class in org.apache.hadoop.hbase.regionserver">StoreScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkFlushed--">checkFlushed</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#enableLazySeekGlobally-boolean-">enableLazySeekGlobally</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getAllScannersForTesting--">getAllScannersForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getEstimatedNumberOfKvsScanned--">getEstimatedNumberOfKvsScanned</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getScannerOrder--">getScannerOrder</a>
 , <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next-java.util.List-org.apache.hadoop.hbase.regionserver.ScannerContext-">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hado
 op.hbase.Cell-">trySkipToNextColumn</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Cell-">trySkipToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToStreamRead--">trySwitchToStreamRead</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#checkFlushed--">checkFlushed</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#enableLazySeekGlobally-boolean-">enableLazySeekGlobally</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getAllScannersForTesting--">getAllScannersForTesting</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getEstimatedNumberOfKvsScanned--">getEstimatedNumberOfKvsScanned</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getNextIndexedKey--">getNextIndexedKey</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a>, 
 <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next-java.util.List-org.apache.hadoop.hbase.regionserver.ScannerContext-">next</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hadoop.hbase.Cell-">trySkipToNextColumn</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html
 #trySkipToNextRow-org.apache.hadoop.hbase.Cell-">trySkipToNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToStreamRead--">trySwitchToStreamRead</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner">


[04/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">41

<TRUNCATED>

[19/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed

<TRUNCATED>

[32/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index 4164818..1d97ed8 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.372">HBaseFsck.FileLockCallable</a>
+<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.379">HBaseFsck.FileLockCallable</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;org.apache.hadoop.fs.FSDataOutputStream&gt;</pre>
 </li>
@@ -212,7 +212,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>retryCounter</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.373">retryCounter</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.380">retryCounter</a></pre>
 </li>
 </ul>
 </li>
@@ -229,7 +229,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>FileLockCallable</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.375">FileLockCallable</a>(<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</pre>
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.382">FileLockCallable</a>(<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</pre>
 </li>
 </ul>
 </li>
@@ -246,7 +246,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.379">call</a>()
+<pre>public&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.386">call</a>()
                                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -262,7 +262,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>createFileWithRetries</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.400">createFileWithRetries</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
+<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.407">createFileWithRetries</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                                                                       org.apache.hadoop.fs.Path&nbsp;hbckLockFilePath,
                                                                       org.apache.hadoop.fs.permission.FsPermission&nbsp;defaultPerms)
                                                                throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index a4b5012..e5088ab 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4857">HBaseFsck.HBaseFsckTool</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4878">HBaseFsck.HBaseFsckTool</a>
 extends org.apache.hadoop.conf.Configured
 implements org.apache.hadoop.util.Tool</pre>
 <div class="block">This is a Tool wrapper that gathers -Dxxx=yyy configuration settings from the command line.</div>
@@ -207,7 +207,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HBaseFsckTool</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4858">HBaseFsckTool</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4879">HBaseFsckTool</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
 </li>
 </ul>
 </li>
@@ -224,7 +224,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>run</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4860">run</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4881">run</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)
         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index 621297c..e1c2cca 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3849">HBaseFsck.HbckInfo</a>
+<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3856">HBaseFsck.HbckInfo</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" title="interface in org.apache.hadoop.hbase.util">KeyRange</a></pre>
 <div class="block">Maintain information about a particular region.  It gathers information
@@ -305,7 +305,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>metaEntry</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3850">metaEntry</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3857">metaEntry</a></pre>
 </li>
 </ul>
 <a name="hdfsEntry">
@@ -314,7 +314,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsEntry</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HdfsEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3851">hdfsEntry</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HdfsEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3858">hdfsEntry</a></pre>
 </li>
 </ul>
 <a name="deployedEntries">
@@ -323,7 +323,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedEntries</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.OnlineEntry</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3852">deployedEntries</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.OnlineEntry</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3859">deployedEntries</a></pre>
 </li>
 </ul>
 <a name="deployedOn">
@@ -332,7 +332,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedOn</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3853">deployedOn</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3860">deployedOn</a></pre>
 </li>
 </ul>
 <a name="skipChecks">
@@ -341,7 +341,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>skipChecks</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3854">skipChecks</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3861">skipChecks</a></pre>
 </li>
 </ul>
 <a name="isMerged">
@@ -350,7 +350,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>isMerged</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3855">isMerged</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3862">isMerged</a></pre>
 </li>
 </ul>
 <a name="deployedReplicaId">
@@ -359,7 +359,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedReplicaId</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3856">deployedReplicaId</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3863">deployedReplicaId</a></pre>
 </li>
 </ul>
 <a name="primaryHRIForDeployedReplica">
@@ -368,7 +368,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockListLast">
 <li class="blockList">
 <h4>primaryHRIForDeployedReplica</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3857">primaryHRIForDeployedReplica</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3864">primaryHRIForDeployedReplica</a></pre>
 </li>
 </ul>
 </li>
@@ -385,7 +385,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HbckInfo</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3859">HbckInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a>&nbsp;metaEntry)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3866">HbckInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a>&nbsp;metaEntry)</pre>
 </li>
 </ul>
 </li>
@@ -402,7 +402,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getReplicaId</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3863">getReplicaId</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3870">getReplicaId</a>()</pre>
 </li>
 </ul>
 <a name="addServer-org.apache.hadoop.hbase.client.RegionInfo-org.apache.hadoop.hbase.ServerName-">
@@ -411,7 +411,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>addServer</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3867">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3874">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri,
                       <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;server)</pre>
 </li>
 </ul>
@@ -421,7 +421,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3880">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3887">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>
@@ -434,7 +434,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getStartKey</h4>
-<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3892">getStartKey</a>()</pre>
+<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3899">getStartKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html#getStartKey--">getStartKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" title="interface in org.apache.hadoop.hbase.util">KeyRange</a></code></dd>
@@ -447,7 +447,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getEndKey</h4>
-<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3904">getEndKey</a>()</pre>
+<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3911">getEndKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html#getEndKey--">getEndKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" title="interface in org.apache.hadoop.hbase.util">KeyRange</a></code></dd>
@@ -460,7 +460,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getTableName</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3915">getTableName</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3922">getTableName</a>()</pre>
 </li>
 </ul>
 <a name="getRegionNameAsString--">
@@ -469,7 +469,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionNameAsString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3932">getRegionNameAsString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3939">getRegionNameAsString</a>()</pre>
 </li>
 </ul>
 <a name="getRegionName--">
@@ -478,7 +478,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionName</h4>
-<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3948">getRegionName</a>()</pre>
+<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3955">getRegionName</a>()</pre>
 </li>
 </ul>
 <a name="getPrimaryHRIForDeployedReplica--">
@@ -487,7 +487,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getPrimaryHRIForDeployedReplica</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3962">getPrimaryHRIForDeployedReplica</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3969">getPrimaryHRIForDeployedReplica</a>()</pre>
 </li>
 </ul>
 <a name="getHdfsRegionDir--">
@@ -496,7 +496,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getHdfsRegionDir</h4>
-<pre>org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3966">getHdfsRegionDir</a>()</pre>
+<pre>org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3973">getHdfsRegionDir</a>()</pre>
 </li>
 </ul>
 <a name="containsOnlyHdfsEdits--">
@@ -505,7 +505,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>containsOnlyHdfsEdits</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3973">containsOnlyHdfsEdits</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3980">containsOnlyHdfsEdits</a>()</pre>
 </li>
 </ul>
 <a name="isHdfsRegioninfoPresent--">
@@ -514,7 +514,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>isHdfsRegioninfoPresent</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3980">isHdfsRegioninfoPresent</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3987">isHdfsRegioninfoPresent</a>()</pre>
 </li>
 </ul>
 <a name="getModTime--">
@@ -523,7 +523,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getModTime</h4>
-<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3987">getModTime</a>()</pre>
+<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3994">getModTime</a>()</pre>
 </li>
 </ul>
 <a name="getHdfsHRI--">
@@ -532,7 +532,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getHdfsHRI</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3994">getHdfsHRI</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4001">getHdfsHRI</a>()</pre>
 </li>
 </ul>
 <a name="setSkipChecks-boolean-">
@@ -541,7 +541,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>setSkipChecks</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4001">setSkipChecks</a>(boolean&nbsp;skipChecks)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4008">setSkipChecks</a>(boolean&nbsp;skipChecks)</pre>
 </li>
 </ul>
 <a name="isSkipChecks--">
@@ -550,7 +550,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>isSkipChecks</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4005">isSkipChecks</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4012">isSkipChecks</a>()</pre>
 </li>
 </ul>
 <a name="setMerged-boolean-">
@@ -559,7 +559,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>setMerged</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4009">setMerged</a>(boolean&nbsp;isMerged)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4016">setMerged</a>(boolean&nbsp;isMerged)</pre>
 </li>
 </ul>
 <a name="isMerged--">
@@ -568,7 +568,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockListLast">
 <li class="blockList">
 <h4>isMerged</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4013">isMerged</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4020">isMerged</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index 1b4df78..14e3f9e 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -107,7 +107,7 @@
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3824">HBaseFsck.HdfsEntry</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3831">HBaseFsck.HdfsEntry</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 <div class="block">Stores the regioninfo entries from HDFS</div>
 </li>
@@ -201,7 +201,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hri</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3825">hri</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3832">hri</a></pre>
 </li>
 </ul>
 <a name="hdfsRegionDir">
@@ -210,7 +210,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsRegionDir</h4>
-<pre>org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3826">hdfsRegionDir</a></pre>
+<pre>org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3833">hdfsRegionDir</a></pre>
 </li>
 </ul>
 <a name="hdfsRegionDirModTime">
@@ -219,7 +219,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsRegionDirModTime</h4>
-<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3827">hdfsRegionDirModTime</a></pre>
+<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3834">hdfsRegionDirModTime</a></pre>
 </li>
 </ul>
 <a name="hdfsRegioninfoFilePresent">
@@ -228,7 +228,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsRegioninfoFilePresent</h4>
-<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3828">hdfsRegioninfoFilePresent</a></pre>
+<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3835">hdfsRegioninfoFilePresent</a></pre>
 </li>
 </ul>
 <a name="hdfsOnlyEdits">
@@ -237,7 +237,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hdfsOnlyEdits</h4>
-<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3829">hdfsOnlyEdits</a></pre>
+<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3836">hdfsOnlyEdits</a></pre>
 </li>
 </ul>
 </li>
@@ -254,7 +254,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HdfsEntry</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3824">HdfsEntry</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3831">HdfsEntry</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index 6fc8186..c8fba8d 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3773">HBaseFsck.MetaEntry</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3780">HBaseFsck.MetaEntry</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</a></pre>
 <div class="block">Stores the regioninfo entries scanned from META</div>
 </li>
@@ -264,7 +264,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>regionServer</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3774">regionServer</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3781">regionServer</a></pre>
 </li>
 </ul>
 <a name="modTime">
@@ -273,7 +273,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>modTime</h4>
-<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3775">modTime</a></pre>
+<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3782">modTime</a></pre>
 </li>
 </ul>
 <a name="splitA">
@@ -282,7 +282,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>splitA</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3776">splitA</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3783">splitA</a></pre>
 </li>
 </ul>
 <a name="splitB">
@@ -291,7 +291,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockListLast">
 <li class="blockList">
 <h4>splitB</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3776">splitB</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3783">splitB</a></pre>
 </li>
 </ul>
 </li>
@@ -308,7 +308,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>MetaEntry</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3778">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3785">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
                  <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;regionServer,
                  long&nbsp;modTime)</pre>
 </li>
@@ -319,7 +319,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockListLast">
 <li class="blockList">
 <h4>MetaEntry</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3782">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3789">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
                  <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;regionServer,
                  long&nbsp;modTime,
                  <a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;splitA,
@@ -340,7 +340,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>equals</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3792">equals</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>&nbsp;o)</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3799">equals</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>&nbsp;o)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html#equals-java.lang.Object-">equals</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</a></code></dd>
@@ -355,7 +355,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hashCode</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3806">hashCode</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3813">hashCode</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html#hashCode--">hashCode</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index 815f323..47d570b 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3835">HBaseFsck.OnlineEntry</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3842">HBaseFsck.OnlineEntry</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 <div class="block">Stores the regioninfo retrieved from Online region servers.</div>
 </li>
@@ -206,7 +206,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hri</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3836">hri</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3843">hri</a></pre>
 </li>
 </ul>
 <a name="hsa">
@@ -215,7 +215,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hsa</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3837">hsa</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3844">hsa</a></pre>
 </li>
 </ul>
 </li>
@@ -232,7 +232,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>OnlineEntry</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3835">OnlineEntry</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3842">OnlineEntry</a>()</pre>
 </li>
 </ul>
 </li>
@@ -249,7 +249,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3840">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3847">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index 95e5eef..fda9ec4 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4144">HBaseFsck.PrintingErrorReporter</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4152">HBaseFsck.PrintingErrorReporter</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></pre>
 </li>
@@ -301,7 +301,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>errorCount</h4>
-<pre>public&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4145">errorCount</a></pre>
+<pre>public&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4153">errorCount</a></pre>
 </li>
 </ul>
 <a name="showProgress">
@@ -310,7 +310,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>showProgress</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4146">showProgress</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4154">showProgress</a></pre>
 </li>
 </ul>
 <a name="progressThreshold">
@@ -319,7 +319,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>progressThreshold</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4148">progressThreshold</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4156">progressThreshold</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.PrintingErrorReporter.progressThreshold">Constant Field Values</a></dd>
@@ -332,7 +332,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>errorTables</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4150">errorTables</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4158">errorTables</a></pre>
 </li>
 </ul>
 <a name="errorList">
@@ -341,7 +341,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockListLast">
 <li class="blockList">
 <h4>errorList</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4153">errorList</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4161">errorList</a></pre>
 </li>
 </ul>
 </li>
@@ -358,7 +358,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockListLast">
 <li class="blockList">
 <h4>PrintingErrorReporter</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4144">PrintingErrorReporter</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4152">PrintingErrorReporter</a>()</pre>
 </li>
 </ul>
 </li>
@@ -375,7 +375,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>clear</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4156">clear</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4164">clear</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#clear--">clear</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -388,7 +388,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4163">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4171">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -402,7 +402,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4178">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4186">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 <dl>
@@ -417,7 +417,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4184">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4192">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info)</pre>
@@ -433,7 +433,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4192">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4200">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info1,
@@ -450,7 +450,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4201">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4209">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#reportError-java.lang.String-">reportError</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -463,7 +463,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>report</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4211">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4219">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <div class="block">Report error information, but do not increment the error count.  Intended for cases
  where the actual error would have been reported previously.</div>
 <dl>
@@ -480,7 +480,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>summarize</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4219">summarize</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4227">summarize</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#summarize--">summarize</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -493,7 +493,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>getErrorList</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4232">getErrorList</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4240">getErrorList</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#getErrorList--">getErrorList</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -506,7 +506,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>print</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4237">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4245">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#print-java.lang.String-">print</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -519,7 +519,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>tableHasErrors</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4244">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4252">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#tableHasErrors-org.apache.hadoop.hbase.util.HBaseFsck.TableInfo-">tableHasErrors</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -532,7 +532,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>resetErrors</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4249">resetErrors</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4257">resetErrors</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#resetErrors--">resetErrors</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -545,7 +545,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>detail</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4254">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4262">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#detail-java.lang.String-">detail</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -558,7 +558,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockListLast">
 <li class="blockList">
 <h4>progress</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4262">progress</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4270">progress</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#progress--">progress</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index e5c90f3..575ec16 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.820">HBaseFsck.RegionBoundariesInformation</a>
+<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.827">HBaseFsck.RegionBoundariesInformation</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 </li>
 </ul>
@@ -219,7 +219,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>regionName</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.821">regionName</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.828">regionName</a></pre>
 </li>
 </ul>
 <a name="metaFirstKey">
@@ -228,7 +228,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>metaFirstKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.822">metaFirstKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.829">metaFirstKey</a></pre>
 </li>
 </ul>
 <a name="metaLastKey">
@@ -237,7 +237,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>metaLastKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.823">metaLastKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.830">metaLastKey</a></pre>
 </li>
 </ul>
 <a name="storesFirstKey">
@@ -246,7 +246,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>storesFirstKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.824">storesFirstKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.831">storesFirstKey</a></pre>
 </li>
 </ul>
 <a name="storesLastKey">
@@ -255,7 +255,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>storesLastKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.825">storesLastKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.832">storesLastKey</a></pre>
 </li>
 </ul>
 </li>
@@ -272,7 +272,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>RegionBoundariesInformation</h4>
-<pre>private&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.820">RegionBoundariesInformation</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.827">RegionBoundariesInformation</a>()</pre>
 </li>
 </ul>
 </li>
@@ -289,7 +289,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.827">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.834">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index f829e0a..2807a7b 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -126,7 +126,7 @@
 </dl>
 <hr>
 <br>
-<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.1262">HBaseFsck.RegionRepairException</a>
+<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.1269">HBaseFsck.RegionRepairException</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Exception thrown when a integrity repair operation fails in an
  unresolvable way.</div>
@@ -221,7 +221,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 <ul class="blockList">
 <li class="blockList">
 <h4>serialVersionUID</h4>
-<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1263">serialVersionUID</a></pre>
+<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1270">serialVersionUID</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.RegionRepairException.serialVersionUID">Constant Field Values</a></dd>
@@ -234,7 +234,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 <ul class="blockListLast">
 <li class="blockList">
 <h4>ioe</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1264">ioe</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1271">ioe</a></pre>
 </li>
 </ul>
 </li>
@@ -251,7 +251,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 <ul class="blockListLast">
 <li class="blockList">
 <h4>RegionRepairException</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1265">RegionRepairException</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;s,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1272">RegionRepairException</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;s,
                              <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>&nbsp;ioe)</pre>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index 747fe1a..1e3b381 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2958">HBaseFsck.TableInfo.HDFSIntegrityFixer</a>
+<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2965">HBaseFsck.TableInfo.HDFSIntegrityFixer</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo.IntegrityFixSuggester</a></pre>
 <div class="block">This handler fixes integrity errors from hdfs information.  There are
  basically three classes of integrity problems 1) holes, 2) overlaps, and
@@ -295,7 +295,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>conf</h4>
-<pre>org.apache.hadoop.conf.Configuration <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2959">conf</a></pre>
+<pre>org.apache.hadoop.conf.Configuration <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2966">conf</a></pre>
 </li>
 </ul>
 <a name="fixOverlaps">
@@ -304,7 +304,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockListLast">
 <li class="blockList">
 <h4>fixOverlaps</h4>
-<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2961">fixOverlaps</a></pre>
+<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2968">fixOverlaps</a></pre>
 </li>
 </ul>
 </li>
@@ -321,7 +321,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HDFSIntegrityFixer</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2963">HDFSIntegrityFixer</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2970">HDFSIntegrityFixer</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
                    <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors,
                    org.apache.hadoop.conf.Configuration&nbsp;conf,
                    boolean&nbsp;fixHoles,
@@ -342,7 +342,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionStartKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2977">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;next)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2984">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;next)
                                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">This is a special case hole -- when the first region of a table is
  missing from META, HBase doesn't acknowledge the existance of the
@@ -367,7 +367,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionEndKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2997">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3004">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleRegionEndKeyNotEmpty-byte:A-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling case where a Table has a last region that does not
@@ -391,7 +391,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleHoleInRegionChain</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3019">handleHoleInRegionChain</a>(byte[]&nbsp;holeStartKey,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3026">handleHoleInRegionChain</a>(byte[]&nbsp;holeStartKey,
                                     byte[]&nbsp;holeStopKey)
                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">There is a hole in the hdfs regions that violates the table integrity
@@ -415,7 +415,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleOverlapGroup</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3049">handleOverlapGroup</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3056">handleOverlapGroup</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
                         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">This takes set of overlapping regions and merges them into a single
  region.  This covers cases like degenerate regions, shared start key,
@@ -444,7 +444,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>removeParentsAndFixSplits</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3074">removeParentsAndFixSplits</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3081">removeParentsAndFixSplits</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
                         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -458,7 +458,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>mergeOverlaps</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3186">mergeOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3193">mergeOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
             throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -472,7 +472,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockListLast">
 <li class="blockList">
 <h4>sidelineBigOverlaps</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3264">sidelineBigOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;bigOverlap)
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3271">sidelineBigOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;bigOverlap)
                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Sideline some regions in a big overlap group so that it
  will have fewer regions, and it is easier to merge them later on.</div>


[02/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.419"

<TRUNCATED>

[29/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/VersionInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/VersionInfo.html b/devapidocs/org/apache/hadoop/hbase/util/VersionInfo.html
index a42abbf..e858321 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/VersionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/VersionInfo.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9};
+var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -213,22 +213,29 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 </td>
 </tr>
 <tr id="i7" class="rowColor">
+<td class="colFirst"><code>(package private) static <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>[]</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/VersionInfo.html#getVersionComponents-java.lang.String-">getVersionComponents</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;version)</code>
+<div class="block">Returns the version components as Integer and String objects
+ Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"]</div>
+</td>
+</tr>
+<tr id="i8" class="altColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/VersionInfo.html#logVersion--">logVersion</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i8" class="altColor">
+<tr id="i9" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/VersionInfo.html#main-java.lang.String:A-">main</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</code>&nbsp;</td>
 </tr>
-<tr id="i9" class="rowColor">
+<tr id="i10" class="altColor">
 <td class="colFirst"><code>(package private) static <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/VersionInfo.html#versionReport--">versionReport</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i10" class="altColor">
+<tr id="i11" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/VersionInfo.html#writeTo-java.io.PrintStream-">writeTo</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/io/PrintStream.html?is-external=true" title="class or interface in java.io">PrintStream</a>&nbsp;out)</code>&nbsp;</td>
 </tr>
-<tr id="i11" class="rowColor">
+<tr id="i12" class="altColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/VersionInfo.html#writeTo-java.io.PrintWriter-">writeTo</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/io/PrintWriter.html?is-external=true" title="class or interface in java.io">PrintWriter</a>&nbsp;out)</code>&nbsp;</td>
 </tr>
@@ -269,7 +276,11 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>VERY_LARGE_NUMBER</h4>
-<pre>private static&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.38">VERY_LARGE_NUMBER</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.38">VERY_LARGE_NUMBER</a></pre>
+<dl>
+<dt><span class="seeLabel">See Also:</span></dt>
+<dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.VersionInfo.VERY_LARGE_NUMBER">Constant Field Values</a></dd>
+</dl>
 </li>
 </ul>
 </li>
@@ -427,13 +438,28 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
                                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;v2)</pre>
 </li>
 </ul>
+<a name="getVersionComponents-java.lang.String-">
+<!--   -->
+</a>
+<ul class="blockList">
+<li class="blockList">
+<h4>getVersionComponents</h4>
+<pre>static&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.153">getVersionComponents</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;version)</pre>
+<div class="block">Returns the version components as Integer and String objects
+ Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"]</div>
+<dl>
+<dt><span class="returnLabel">Returns:</span></dt>
+<dd>the components of the version string</dd>
+</dl>
+</li>
+</ul>
 <a name="main-java.lang.String:A-">
 <!--   -->
 </a>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>main</h4>
-<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.154">main</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</pre>
+<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.169">main</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index 90aa9ce..cc1c42f 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -532,14 +532,14 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PrettyPrinter.Unit.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PrettyPrinter.Unit</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/IdReadWriteLock.ReferenceType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">IdReadWriteLock.ReferenceType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PrettyPrinter.Unit.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PrettyPrinter.Unit</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Order.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Order</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PoolMap.PoolType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PoolMap.PoolType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">HBaseFsck.ErrorReporter.ERROR_CODE</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/ChecksumType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">ChecksumType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Bytes.LexicographicalComparerHolder.PureJavaComparer</span></a> (implements org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.Comparer.html" title="interface in org.apache.hadoop.hbase.util">Bytes.Comparer</a>&lt;T&gt;)</li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Bytes.LexicographicalComparerHolder.UnsafeComparer</span></a> (implements org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.Comparer.html" title="interface in org.apache.hadoop.hbase.util">Bytes.Comparer</a>&lt;T&gt;)</li>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">HBaseFsck.ErrorReporter.ERROR_CODE</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PoolMap.PoolType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PoolMap.PoolType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Order.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Order</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index c0e28b4..9ec5b35 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -189,8 +189,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">RegionGroupingProvider.Strategies</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/WALFactory.Providers.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">WALFactory.Providers</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">RegionGroupingProvider.Strategies</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index ebfbf3a..ce68f9b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 <span class="sourceLineNo">008</span>@InterfaceAudience.Private<a name="line.8"></a>
 <span class="sourceLineNo">009</span>public class Version {<a name="line.9"></a>
 <span class="sourceLineNo">010</span>  public static final String version = "3.0.0-SNAPSHOT";<a name="line.10"></a>
-<span class="sourceLineNo">011</span>  public static final String revision = "f4f2b68238a094d7b1931dc8b7939742ccbb2b57";<a name="line.11"></a>
+<span class="sourceLineNo">011</span>  public static final String revision = "556b22374423ff087c0583d02ae4298d4d4f2e6b";<a name="line.11"></a>
 <span class="sourceLineNo">012</span>  public static final String user = "jenkins";<a name="line.12"></a>
-<span class="sourceLineNo">013</span>  public static final String date = "Wed Apr 18 14:38:53 UTC 2018";<a name="line.13"></a>
+<span class="sourceLineNo">013</span>  public static final String date = "Thu Apr 19 14:39:00 UTC 2018";<a name="line.13"></a>
 <span class="sourceLineNo">014</span>  public static final String url = "git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";<a name="line.14"></a>
-<span class="sourceLineNo">015</span>  public static final String srcChecksum = "140c55771a388fd58f0c3c7100fa35b2";<a name="line.15"></a>
+<span class="sourceLineNo">015</span>  public static final String srcChecksum = "83ef0b63e39df660933d8e09ab06a005";<a name="line.15"></a>
 <span class="sourceLineNo">016</span>}<a name="line.16"></a>
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
index c938b47..9267f53 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.PlanComparator.html
@@ -30,205 +30,209 @@
 <span class="sourceLineNo">022</span>import java.util.Collections;<a name="line.22"></a>
 <span class="sourceLineNo">023</span>import java.util.Comparator;<a name="line.23"></a>
 <span class="sourceLineNo">024</span>import java.util.List;<a name="line.24"></a>
-<span class="sourceLineNo">025</span>import org.apache.hadoop.hbase.HBaseIOException;<a name="line.25"></a>
-<span class="sourceLineNo">026</span>import org.apache.hadoop.hbase.RegionMetrics;<a name="line.26"></a>
-<span class="sourceLineNo">027</span>import org.apache.hadoop.hbase.ServerName;<a name="line.27"></a>
-<span class="sourceLineNo">028</span>import org.apache.hadoop.hbase.Size;<a name="line.28"></a>
-<span class="sourceLineNo">029</span>import org.apache.hadoop.hbase.TableName;<a name="line.29"></a>
-<span class="sourceLineNo">030</span>import org.apache.hadoop.hbase.client.MasterSwitchType;<a name="line.30"></a>
-<span class="sourceLineNo">031</span>import org.apache.hadoop.hbase.client.RegionInfo;<a name="line.31"></a>
-<span class="sourceLineNo">032</span>import org.apache.hadoop.hbase.master.MasterRpcServices;<a name="line.32"></a>
-<span class="sourceLineNo">033</span>import org.apache.hadoop.hbase.master.MasterServices;<a name="line.33"></a>
-<span class="sourceLineNo">034</span>import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;<a name="line.34"></a>
-<span class="sourceLineNo">035</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.35"></a>
-<span class="sourceLineNo">036</span>import org.slf4j.Logger;<a name="line.36"></a>
-<span class="sourceLineNo">037</span>import org.slf4j.LoggerFactory;<a name="line.37"></a>
-<span class="sourceLineNo">038</span><a name="line.38"></a>
-<span class="sourceLineNo">039</span>import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;<a name="line.39"></a>
-<span class="sourceLineNo">040</span><a name="line.40"></a>
-<span class="sourceLineNo">041</span>/**<a name="line.41"></a>
-<span class="sourceLineNo">042</span> * Simple implementation of region normalizer.<a name="line.42"></a>
-<span class="sourceLineNo">043</span> *<a name="line.43"></a>
-<span class="sourceLineNo">044</span> * Logic in use:<a name="line.44"></a>
-<span class="sourceLineNo">045</span> *<a name="line.45"></a>
-<span class="sourceLineNo">046</span> *  &lt;ol&gt;<a name="line.46"></a>
-<span class="sourceLineNo">047</span> *  &lt;li&gt; Get all regions of a given table<a name="line.47"></a>
-<span class="sourceLineNo">048</span> *  &lt;li&gt; Get avg size S of each region (by total size of store files reported in RegionMetrics)<a name="line.48"></a>
-<span class="sourceLineNo">049</span> *  &lt;li&gt; Seek every single region one by one. If a region R0 is bigger than S * 2, it is<a name="line.49"></a>
-<span class="sourceLineNo">050</span> *  kindly requested to split. Thereon evaluate the next region R1<a name="line.50"></a>
-<span class="sourceLineNo">051</span> *  &lt;li&gt; Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.<a name="line.51"></a>
-<span class="sourceLineNo">052</span> *  Thereon evaluate the next region R2<a name="line.52"></a>
-<span class="sourceLineNo">053</span> *  &lt;li&gt; Otherwise, R1 is evaluated<a name="line.53"></a>
-<span class="sourceLineNo">054</span> * &lt;/ol&gt;<a name="line.54"></a>
-<span class="sourceLineNo">055</span> * &lt;p&gt;<a name="line.55"></a>
-<span class="sourceLineNo">056</span> * Region sizes are coarse and approximate on the order of megabytes. Additionally,<a name="line.56"></a>
-<span class="sourceLineNo">057</span> * "empty" regions (less than 1MB, with the previous note) are not merged away. This<a name="line.57"></a>
-<span class="sourceLineNo">058</span> * is by design to prevent normalization from undoing the pre-splitting of a table.<a name="line.58"></a>
-<span class="sourceLineNo">059</span> */<a name="line.59"></a>
-<span class="sourceLineNo">060</span>@InterfaceAudience.Private<a name="line.60"></a>
-<span class="sourceLineNo">061</span>public class SimpleRegionNormalizer implements RegionNormalizer {<a name="line.61"></a>
-<span class="sourceLineNo">062</span><a name="line.62"></a>
-<span class="sourceLineNo">063</span>  private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class);<a name="line.63"></a>
-<span class="sourceLineNo">064</span>  private static final int MIN_REGION_COUNT = 3;<a name="line.64"></a>
-<span class="sourceLineNo">065</span>  private MasterServices masterServices;<a name="line.65"></a>
-<span class="sourceLineNo">066</span>  private MasterRpcServices masterRpcServices;<a name="line.66"></a>
-<span class="sourceLineNo">067</span>  private static long[] skippedCount = new long[NormalizationPlan.PlanType.values().length];<a name="line.67"></a>
-<span class="sourceLineNo">068</span><a name="line.68"></a>
-<span class="sourceLineNo">069</span>  /**<a name="line.69"></a>
-<span class="sourceLineNo">070</span>   * Set the master service.<a name="line.70"></a>
-<span class="sourceLineNo">071</span>   * @param masterServices inject instance of MasterServices<a name="line.71"></a>
-<span class="sourceLineNo">072</span>   */<a name="line.72"></a>
-<span class="sourceLineNo">073</span>  @Override<a name="line.73"></a>
-<span class="sourceLineNo">074</span>  public void setMasterServices(MasterServices masterServices) {<a name="line.74"></a>
-<span class="sourceLineNo">075</span>    this.masterServices = masterServices;<a name="line.75"></a>
-<span class="sourceLineNo">076</span>  }<a name="line.76"></a>
-<span class="sourceLineNo">077</span><a name="line.77"></a>
-<span class="sourceLineNo">078</span>  @Override<a name="line.78"></a>
-<span class="sourceLineNo">079</span>  public void setMasterRpcServices(MasterRpcServices masterRpcServices) {<a name="line.79"></a>
-<span class="sourceLineNo">080</span>    this.masterRpcServices = masterRpcServices;<a name="line.80"></a>
-<span class="sourceLineNo">081</span>  }<a name="line.81"></a>
-<span class="sourceLineNo">082</span><a name="line.82"></a>
-<span class="sourceLineNo">083</span>  @Override<a name="line.83"></a>
-<span class="sourceLineNo">084</span>  public void planSkipped(RegionInfo hri, PlanType type) {<a name="line.84"></a>
-<span class="sourceLineNo">085</span>    skippedCount[type.ordinal()]++;<a name="line.85"></a>
-<span class="sourceLineNo">086</span>  }<a name="line.86"></a>
-<span class="sourceLineNo">087</span><a name="line.87"></a>
-<span class="sourceLineNo">088</span>  @Override<a name="line.88"></a>
-<span class="sourceLineNo">089</span>  public long getSkippedCount(NormalizationPlan.PlanType type) {<a name="line.89"></a>
-<span class="sourceLineNo">090</span>    return skippedCount[type.ordinal()];<a name="line.90"></a>
-<span class="sourceLineNo">091</span>  }<a name="line.91"></a>
-<span class="sourceLineNo">092</span><a name="line.92"></a>
-<span class="sourceLineNo">093</span>  /**<a name="line.93"></a>
-<span class="sourceLineNo">094</span>   * Comparator class that gives higher priority to region Split plan.<a name="line.94"></a>
-<span class="sourceLineNo">095</span>   */<a name="line.95"></a>
-<span class="sourceLineNo">096</span>  static class PlanComparator implements Comparator&lt;NormalizationPlan&gt; {<a name="line.96"></a>
-<span class="sourceLineNo">097</span>    @Override<a name="line.97"></a>
-<span class="sourceLineNo">098</span>    public int compare(NormalizationPlan plan1, NormalizationPlan plan2) {<a name="line.98"></a>
-<span class="sourceLineNo">099</span>      boolean plan1IsSplit = plan1 instanceof SplitNormalizationPlan;<a name="line.99"></a>
-<span class="sourceLineNo">100</span>      boolean plan2IsSplit = plan2 instanceof SplitNormalizationPlan;<a name="line.100"></a>
-<span class="sourceLineNo">101</span>      if (plan1IsSplit &amp;&amp; plan2IsSplit) {<a name="line.101"></a>
-<span class="sourceLineNo">102</span>        return 0;<a name="line.102"></a>
-<span class="sourceLineNo">103</span>      } else if (plan1IsSplit) {<a name="line.103"></a>
-<span class="sourceLineNo">104</span>        return -1;<a name="line.104"></a>
-<span class="sourceLineNo">105</span>      } else if (plan2IsSplit) {<a name="line.105"></a>
-<span class="sourceLineNo">106</span>        return 1;<a name="line.106"></a>
-<span class="sourceLineNo">107</span>      } else {<a name="line.107"></a>
-<span class="sourceLineNo">108</span>        return 0;<a name="line.108"></a>
-<span class="sourceLineNo">109</span>      }<a name="line.109"></a>
-<span class="sourceLineNo">110</span>    }<a name="line.110"></a>
-<span class="sourceLineNo">111</span>  }<a name="line.111"></a>
-<span class="sourceLineNo">112</span><a name="line.112"></a>
-<span class="sourceLineNo">113</span>  private Comparator&lt;NormalizationPlan&gt; planComparator = new PlanComparator();<a name="line.113"></a>
-<span class="sourceLineNo">114</span><a name="line.114"></a>
-<span class="sourceLineNo">115</span>  /**<a name="line.115"></a>
-<span class="sourceLineNo">116</span>   * Computes next most "urgent" normalization action on the table.<a name="line.116"></a>
-<span class="sourceLineNo">117</span>   * Action may be either a split, or a merge, or no action.<a name="line.117"></a>
-<span class="sourceLineNo">118</span>   *<a name="line.118"></a>
-<span class="sourceLineNo">119</span>   * @param table table to normalize<a name="line.119"></a>
-<span class="sourceLineNo">120</span>   * @return normalization plan to execute<a name="line.120"></a>
-<span class="sourceLineNo">121</span>   */<a name="line.121"></a>
-<span class="sourceLineNo">122</span>  @Override<a name="line.122"></a>
-<span class="sourceLineNo">123</span>  public List&lt;NormalizationPlan&gt; computePlanForTable(TableName table) throws HBaseIOException {<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    if (table == null || table.isSystemTable()) {<a name="line.124"></a>
-<span class="sourceLineNo">125</span>      LOG.debug("Normalization of system table " + table + " isn't allowed");<a name="line.125"></a>
-<span class="sourceLineNo">126</span>      return null;<a name="line.126"></a>
-<span class="sourceLineNo">127</span>    }<a name="line.127"></a>
-<span class="sourceLineNo">128</span><a name="line.128"></a>
-<span class="sourceLineNo">129</span>    List&lt;NormalizationPlan&gt; plans = new ArrayList&lt;&gt;();<a name="line.129"></a>
-<span class="sourceLineNo">130</span>    List&lt;RegionInfo&gt; tableRegions = masterServices.getAssignmentManager().getRegionStates().<a name="line.130"></a>
-<span class="sourceLineNo">131</span>      getRegionsOfTable(table);<a name="line.131"></a>
+<span class="sourceLineNo">025</span>import org.apache.hadoop.hbase.HBaseConfiguration;<a name="line.25"></a>
+<span class="sourceLineNo">026</span>import org.apache.hadoop.hbase.HBaseIOException;<a name="line.26"></a>
+<span class="sourceLineNo">027</span>import org.apache.hadoop.hbase.RegionMetrics;<a name="line.27"></a>
+<span class="sourceLineNo">028</span>import org.apache.hadoop.hbase.ServerName;<a name="line.28"></a>
+<span class="sourceLineNo">029</span>import org.apache.hadoop.hbase.Size;<a name="line.29"></a>
+<span class="sourceLineNo">030</span>import org.apache.hadoop.hbase.TableName;<a name="line.30"></a>
+<span class="sourceLineNo">031</span>import org.apache.hadoop.hbase.client.MasterSwitchType;<a name="line.31"></a>
+<span class="sourceLineNo">032</span>import org.apache.hadoop.hbase.client.RegionInfo;<a name="line.32"></a>
+<span class="sourceLineNo">033</span>import org.apache.hadoop.hbase.master.MasterRpcServices;<a name="line.33"></a>
+<span class="sourceLineNo">034</span>import org.apache.hadoop.hbase.master.MasterServices;<a name="line.34"></a>
+<span class="sourceLineNo">035</span>import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;<a name="line.35"></a>
+<span class="sourceLineNo">036</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.36"></a>
+<span class="sourceLineNo">037</span>import org.slf4j.Logger;<a name="line.37"></a>
+<span class="sourceLineNo">038</span>import org.slf4j.LoggerFactory;<a name="line.38"></a>
+<span class="sourceLineNo">039</span><a name="line.39"></a>
+<span class="sourceLineNo">040</span>import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;<a name="line.40"></a>
+<span class="sourceLineNo">041</span><a name="line.41"></a>
+<span class="sourceLineNo">042</span>/**<a name="line.42"></a>
+<span class="sourceLineNo">043</span> * Simple implementation of region normalizer.<a name="line.43"></a>
+<span class="sourceLineNo">044</span> *<a name="line.44"></a>
+<span class="sourceLineNo">045</span> * Logic in use:<a name="line.45"></a>
+<span class="sourceLineNo">046</span> *<a name="line.46"></a>
+<span class="sourceLineNo">047</span> *  &lt;ol&gt;<a name="line.47"></a>
+<span class="sourceLineNo">048</span> *  &lt;li&gt; Get all regions of a given table<a name="line.48"></a>
+<span class="sourceLineNo">049</span> *  &lt;li&gt; Get avg size S of each region (by total size of store files reported in RegionMetrics)<a name="line.49"></a>
+<span class="sourceLineNo">050</span> *  &lt;li&gt; Seek every single region one by one. If a region R0 is bigger than S * 2, it is<a name="line.50"></a>
+<span class="sourceLineNo">051</span> *  kindly requested to split. Thereon evaluate the next region R1<a name="line.51"></a>
+<span class="sourceLineNo">052</span> *  &lt;li&gt; Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.<a name="line.52"></a>
+<span class="sourceLineNo">053</span> *  Thereon evaluate the next region R2<a name="line.53"></a>
+<span class="sourceLineNo">054</span> *  &lt;li&gt; Otherwise, R1 is evaluated<a name="line.54"></a>
+<span class="sourceLineNo">055</span> * &lt;/ol&gt;<a name="line.55"></a>
+<span class="sourceLineNo">056</span> * &lt;p&gt;<a name="line.56"></a>
+<span class="sourceLineNo">057</span> * Region sizes are coarse and approximate on the order of megabytes. Additionally,<a name="line.57"></a>
+<span class="sourceLineNo">058</span> * "empty" regions (less than 1MB, with the previous note) are not merged away. This<a name="line.58"></a>
+<span class="sourceLineNo">059</span> * is by design to prevent normalization from undoing the pre-splitting of a table.<a name="line.59"></a>
+<span class="sourceLineNo">060</span> */<a name="line.60"></a>
+<span class="sourceLineNo">061</span>@InterfaceAudience.Private<a name="line.61"></a>
+<span class="sourceLineNo">062</span>public class SimpleRegionNormalizer implements RegionNormalizer {<a name="line.62"></a>
+<span class="sourceLineNo">063</span><a name="line.63"></a>
+<span class="sourceLineNo">064</span>  private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class);<a name="line.64"></a>
+<span class="sourceLineNo">065</span>  private int minRegionCount;<a name="line.65"></a>
+<span class="sourceLineNo">066</span>  private MasterServices masterServices;<a name="line.66"></a>
+<span class="sourceLineNo">067</span>  private MasterRpcServices masterRpcServices;<a name="line.67"></a>
+<span class="sourceLineNo">068</span>  private static long[] skippedCount = new long[NormalizationPlan.PlanType.values().length];<a name="line.68"></a>
+<span class="sourceLineNo">069</span><a name="line.69"></a>
+<span class="sourceLineNo">070</span>  public SimpleRegionNormalizer() {<a name="line.70"></a>
+<span class="sourceLineNo">071</span>    minRegionCount = HBaseConfiguration.create().getInt("hbase.normalizer.min.region.count", 3);<a name="line.71"></a>
+<span class="sourceLineNo">072</span>  }<a name="line.72"></a>
+<span class="sourceLineNo">073</span>  /**<a name="line.73"></a>
+<span class="sourceLineNo">074</span>   * Set the master service.<a name="line.74"></a>
+<span class="sourceLineNo">075</span>   * @param masterServices inject instance of MasterServices<a name="line.75"></a>
+<span class="sourceLineNo">076</span>   */<a name="line.76"></a>
+<span class="sourceLineNo">077</span>  @Override<a name="line.77"></a>
+<span class="sourceLineNo">078</span>  public void setMasterServices(MasterServices masterServices) {<a name="line.78"></a>
+<span class="sourceLineNo">079</span>    this.masterServices = masterServices;<a name="line.79"></a>
+<span class="sourceLineNo">080</span>  }<a name="line.80"></a>
+<span class="sourceLineNo">081</span><a name="line.81"></a>
+<span class="sourceLineNo">082</span>  @Override<a name="line.82"></a>
+<span class="sourceLineNo">083</span>  public void setMasterRpcServices(MasterRpcServices masterRpcServices) {<a name="line.83"></a>
+<span class="sourceLineNo">084</span>    this.masterRpcServices = masterRpcServices;<a name="line.84"></a>
+<span class="sourceLineNo">085</span>  }<a name="line.85"></a>
+<span class="sourceLineNo">086</span><a name="line.86"></a>
+<span class="sourceLineNo">087</span>  @Override<a name="line.87"></a>
+<span class="sourceLineNo">088</span>  public void planSkipped(RegionInfo hri, PlanType type) {<a name="line.88"></a>
+<span class="sourceLineNo">089</span>    skippedCount[type.ordinal()]++;<a name="line.89"></a>
+<span class="sourceLineNo">090</span>  }<a name="line.90"></a>
+<span class="sourceLineNo">091</span><a name="line.91"></a>
+<span class="sourceLineNo">092</span>  @Override<a name="line.92"></a>
+<span class="sourceLineNo">093</span>  public long getSkippedCount(NormalizationPlan.PlanType type) {<a name="line.93"></a>
+<span class="sourceLineNo">094</span>    return skippedCount[type.ordinal()];<a name="line.94"></a>
+<span class="sourceLineNo">095</span>  }<a name="line.95"></a>
+<span class="sourceLineNo">096</span><a name="line.96"></a>
+<span class="sourceLineNo">097</span>  /**<a name="line.97"></a>
+<span class="sourceLineNo">098</span>   * Comparator class that gives higher priority to region Split plan.<a name="line.98"></a>
+<span class="sourceLineNo">099</span>   */<a name="line.99"></a>
+<span class="sourceLineNo">100</span>  static class PlanComparator implements Comparator&lt;NormalizationPlan&gt; {<a name="line.100"></a>
+<span class="sourceLineNo">101</span>    @Override<a name="line.101"></a>
+<span class="sourceLineNo">102</span>    public int compare(NormalizationPlan plan1, NormalizationPlan plan2) {<a name="line.102"></a>
+<span class="sourceLineNo">103</span>      boolean plan1IsSplit = plan1 instanceof SplitNormalizationPlan;<a name="line.103"></a>
+<span class="sourceLineNo">104</span>      boolean plan2IsSplit = plan2 instanceof SplitNormalizationPlan;<a name="line.104"></a>
+<span class="sourceLineNo">105</span>      if (plan1IsSplit &amp;&amp; plan2IsSplit) {<a name="line.105"></a>
+<span class="sourceLineNo">106</span>        return 0;<a name="line.106"></a>
+<span class="sourceLineNo">107</span>      } else if (plan1IsSplit) {<a name="line.107"></a>
+<span class="sourceLineNo">108</span>        return -1;<a name="line.108"></a>
+<span class="sourceLineNo">109</span>      } else if (plan2IsSplit) {<a name="line.109"></a>
+<span class="sourceLineNo">110</span>        return 1;<a name="line.110"></a>
+<span class="sourceLineNo">111</span>      } else {<a name="line.111"></a>
+<span class="sourceLineNo">112</span>        return 0;<a name="line.112"></a>
+<span class="sourceLineNo">113</span>      }<a name="line.113"></a>
+<span class="sourceLineNo">114</span>    }<a name="line.114"></a>
+<span class="sourceLineNo">115</span>  }<a name="line.115"></a>
+<span class="sourceLineNo">116</span><a name="line.116"></a>
+<span class="sourceLineNo">117</span>  private Comparator&lt;NormalizationPlan&gt; planComparator = new PlanComparator();<a name="line.117"></a>
+<span class="sourceLineNo">118</span><a name="line.118"></a>
+<span class="sourceLineNo">119</span>  /**<a name="line.119"></a>
+<span class="sourceLineNo">120</span>   * Computes next most "urgent" normalization action on the table.<a name="line.120"></a>
+<span class="sourceLineNo">121</span>   * Action may be either a split, or a merge, or no action.<a name="line.121"></a>
+<span class="sourceLineNo">122</span>   *<a name="line.122"></a>
+<span class="sourceLineNo">123</span>   * @param table table to normalize<a name="line.123"></a>
+<span class="sourceLineNo">124</span>   * @return normalization plan to execute<a name="line.124"></a>
+<span class="sourceLineNo">125</span>   */<a name="line.125"></a>
+<span class="sourceLineNo">126</span>  @Override<a name="line.126"></a>
+<span class="sourceLineNo">127</span>  public List&lt;NormalizationPlan&gt; computePlanForTable(TableName table) throws HBaseIOException {<a name="line.127"></a>
+<span class="sourceLineNo">128</span>    if (table == null || table.isSystemTable()) {<a name="line.128"></a>
+<span class="sourceLineNo">129</span>      LOG.debug("Normalization of system table " + table + " isn't allowed");<a name="line.129"></a>
+<span class="sourceLineNo">130</span>      return null;<a name="line.130"></a>
+<span class="sourceLineNo">131</span>    }<a name="line.131"></a>
 <span class="sourceLineNo">132</span><a name="line.132"></a>
-<span class="sourceLineNo">133</span>    //TODO: should we make min number of regions a config param?<a name="line.133"></a>
-<span class="sourceLineNo">134</span>    if (tableRegions == null || tableRegions.size() &lt; MIN_REGION_COUNT) {<a name="line.134"></a>
-<span class="sourceLineNo">135</span>      int nrRegions = tableRegions == null ? 0 : tableRegions.size();<a name="line.135"></a>
-<span class="sourceLineNo">136</span>      LOG.debug("Table " + table + " has " + nrRegions + " regions, required min number"<a name="line.136"></a>
-<span class="sourceLineNo">137</span>        + " of regions for normalizer to run is " + MIN_REGION_COUNT + ", not running normalizer");<a name="line.137"></a>
-<span class="sourceLineNo">138</span>      return null;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>    }<a name="line.139"></a>
-<span class="sourceLineNo">140</span><a name="line.140"></a>
-<span class="sourceLineNo">141</span>    LOG.debug("Computing normalization plan for table: " + table +<a name="line.141"></a>
-<span class="sourceLineNo">142</span>      ", number of regions: " + tableRegions.size());<a name="line.142"></a>
-<span class="sourceLineNo">143</span><a name="line.143"></a>
-<span class="sourceLineNo">144</span>    long totalSizeMb = 0;<a name="line.144"></a>
-<span class="sourceLineNo">145</span>    int acutalRegionCnt = 0;<a name="line.145"></a>
-<span class="sourceLineNo">146</span><a name="line.146"></a>
-<span class="sourceLineNo">147</span>    for (int i = 0; i &lt; tableRegions.size(); i++) {<a name="line.147"></a>
-<span class="sourceLineNo">148</span>      RegionInfo hri = tableRegions.get(i);<a name="line.148"></a>
-<span class="sourceLineNo">149</span>      long regionSize = getRegionSize(hri);<a name="line.149"></a>
-<span class="sourceLineNo">150</span>      if (regionSize &gt; 0) {<a name="line.150"></a>
-<span class="sourceLineNo">151</span>        acutalRegionCnt++;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>        totalSizeMb += regionSize;<a name="line.152"></a>
-<span class="sourceLineNo">153</span>      }<a name="line.153"></a>
-<span class="sourceLineNo">154</span>    }<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>    double avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) acutalRegionCnt;<a name="line.156"></a>
-<span class="sourceLineNo">157</span><a name="line.157"></a>
-<span class="sourceLineNo">158</span>    LOG.debug("Table " + table + ", total aggregated regions size: " + totalSizeMb);<a name="line.158"></a>
-<span class="sourceLineNo">159</span>    LOG.debug("Table " + table + ", average region size: " + avgRegionSize);<a name="line.159"></a>
-<span class="sourceLineNo">160</span><a name="line.160"></a>
-<span class="sourceLineNo">161</span>    int candidateIdx = 0;<a name="line.161"></a>
-<span class="sourceLineNo">162</span>    boolean splitEnabled = true, mergeEnabled = true;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>    try {<a name="line.163"></a>
-<span class="sourceLineNo">164</span>      splitEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.164"></a>
-<span class="sourceLineNo">165</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled();<a name="line.165"></a>
-<span class="sourceLineNo">166</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.166"></a>
-<span class="sourceLineNo">167</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.167"></a>
-<span class="sourceLineNo">168</span>    }<a name="line.168"></a>
-<span class="sourceLineNo">169</span>    try {<a name="line.169"></a>
-<span class="sourceLineNo">170</span>      mergeEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.170"></a>
-<span class="sourceLineNo">171</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled();<a name="line.171"></a>
-<span class="sourceLineNo">172</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.172"></a>
-<span class="sourceLineNo">173</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.173"></a>
-<span class="sourceLineNo">174</span>    }<a name="line.174"></a>
-<span class="sourceLineNo">175</span>    while (candidateIdx &lt; tableRegions.size()) {<a name="line.175"></a>
-<span class="sourceLineNo">176</span>      RegionInfo hri = tableRegions.get(candidateIdx);<a name="line.176"></a>
-<span class="sourceLineNo">177</span>      long regionSize = getRegionSize(hri);<a name="line.177"></a>
-<span class="sourceLineNo">178</span>      // if the region is &gt; 2 times larger than average, we split it, split<a name="line.178"></a>
-<span class="sourceLineNo">179</span>      // is more high priority normalization action than merge.<a name="line.179"></a>
-<span class="sourceLineNo">180</span>      if (regionSize &gt; 2 * avgRegionSize) {<a name="line.180"></a>
-<span class="sourceLineNo">181</span>        if (splitEnabled) {<a name="line.181"></a>
-<span class="sourceLineNo">182</span>          LOG.info("Table " + table + ", large region " + hri.getRegionNameAsString() + " has size "<a name="line.182"></a>
-<span class="sourceLineNo">183</span>              + regionSize + ", more than twice avg size, splitting");<a name="line.183"></a>
-<span class="sourceLineNo">184</span>          plans.add(new SplitNormalizationPlan(hri, null));<a name="line.184"></a>
-<span class="sourceLineNo">185</span>        }<a name="line.185"></a>
-<span class="sourceLineNo">186</span>      } else {<a name="line.186"></a>
-<span class="sourceLineNo">187</span>        if (candidateIdx == tableRegions.size()-1) {<a name="line.187"></a>
-<span class="sourceLineNo">188</span>          break;<a name="line.188"></a>
+<span class="sourceLineNo">133</span>    List&lt;NormalizationPlan&gt; plans = new ArrayList&lt;&gt;();<a name="line.133"></a>
+<span class="sourceLineNo">134</span>    List&lt;RegionInfo&gt; tableRegions = masterServices.getAssignmentManager().getRegionStates().<a name="line.134"></a>
+<span class="sourceLineNo">135</span>      getRegionsOfTable(table);<a name="line.135"></a>
+<span class="sourceLineNo">136</span><a name="line.136"></a>
+<span class="sourceLineNo">137</span>    //TODO: should we make min number of regions a config param?<a name="line.137"></a>
+<span class="sourceLineNo">138</span>    if (tableRegions == null || tableRegions.size() &lt; minRegionCount) {<a name="line.138"></a>
+<span class="sourceLineNo">139</span>      int nrRegions = tableRegions == null ? 0 : tableRegions.size();<a name="line.139"></a>
+<span class="sourceLineNo">140</span>      LOG.debug("Table " + table + " has " + nrRegions + " regions, required min number"<a name="line.140"></a>
+<span class="sourceLineNo">141</span>        + " of regions for normalizer to run is " + minRegionCount + ", not running normalizer");<a name="line.141"></a>
+<span class="sourceLineNo">142</span>      return null;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    }<a name="line.143"></a>
+<span class="sourceLineNo">144</span><a name="line.144"></a>
+<span class="sourceLineNo">145</span>    LOG.debug("Computing normalization plan for table: " + table +<a name="line.145"></a>
+<span class="sourceLineNo">146</span>      ", number of regions: " + tableRegions.size());<a name="line.146"></a>
+<span class="sourceLineNo">147</span><a name="line.147"></a>
+<span class="sourceLineNo">148</span>    long totalSizeMb = 0;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>    int acutalRegionCnt = 0;<a name="line.149"></a>
+<span class="sourceLineNo">150</span><a name="line.150"></a>
+<span class="sourceLineNo">151</span>    for (int i = 0; i &lt; tableRegions.size(); i++) {<a name="line.151"></a>
+<span class="sourceLineNo">152</span>      RegionInfo hri = tableRegions.get(i);<a name="line.152"></a>
+<span class="sourceLineNo">153</span>      long regionSize = getRegionSize(hri);<a name="line.153"></a>
+<span class="sourceLineNo">154</span>      if (regionSize &gt; 0) {<a name="line.154"></a>
+<span class="sourceLineNo">155</span>        acutalRegionCnt++;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>        totalSizeMb += regionSize;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>      }<a name="line.157"></a>
+<span class="sourceLineNo">158</span>    }<a name="line.158"></a>
+<span class="sourceLineNo">159</span><a name="line.159"></a>
+<span class="sourceLineNo">160</span>    double avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) acutalRegionCnt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span><a name="line.161"></a>
+<span class="sourceLineNo">162</span>    LOG.debug("Table " + table + ", total aggregated regions size: " + totalSizeMb);<a name="line.162"></a>
+<span class="sourceLineNo">163</span>    LOG.debug("Table " + table + ", average region size: " + avgRegionSize);<a name="line.163"></a>
+<span class="sourceLineNo">164</span><a name="line.164"></a>
+<span class="sourceLineNo">165</span>    int candidateIdx = 0;<a name="line.165"></a>
+<span class="sourceLineNo">166</span>    boolean splitEnabled = true, mergeEnabled = true;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>    try {<a name="line.167"></a>
+<span class="sourceLineNo">168</span>      splitEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.168"></a>
+<span class="sourceLineNo">169</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled();<a name="line.169"></a>
+<span class="sourceLineNo">170</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.170"></a>
+<span class="sourceLineNo">171</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.171"></a>
+<span class="sourceLineNo">172</span>    }<a name="line.172"></a>
+<span class="sourceLineNo">173</span>    try {<a name="line.173"></a>
+<span class="sourceLineNo">174</span>      mergeEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.174"></a>
+<span class="sourceLineNo">175</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled();<a name="line.175"></a>
+<span class="sourceLineNo">176</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.176"></a>
+<span class="sourceLineNo">177</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.177"></a>
+<span class="sourceLineNo">178</span>    }<a name="line.178"></a>
+<span class="sourceLineNo">179</span>    while (candidateIdx &lt; tableRegions.size()) {<a name="line.179"></a>
+<span class="sourceLineNo">180</span>      RegionInfo hri = tableRegions.get(candidateIdx);<a name="line.180"></a>
+<span class="sourceLineNo">181</span>      long regionSize = getRegionSize(hri);<a name="line.181"></a>
+<span class="sourceLineNo">182</span>      // if the region is &gt; 2 times larger than average, we split it, split<a name="line.182"></a>
+<span class="sourceLineNo">183</span>      // is more high priority normalization action than merge.<a name="line.183"></a>
+<span class="sourceLineNo">184</span>      if (regionSize &gt; 2 * avgRegionSize) {<a name="line.184"></a>
+<span class="sourceLineNo">185</span>        if (splitEnabled) {<a name="line.185"></a>
+<span class="sourceLineNo">186</span>          LOG.info("Table " + table + ", large region " + hri.getRegionNameAsString() + " has size "<a name="line.186"></a>
+<span class="sourceLineNo">187</span>              + regionSize + ", more than twice avg size, splitting");<a name="line.187"></a>
+<span class="sourceLineNo">188</span>          plans.add(new SplitNormalizationPlan(hri, null));<a name="line.188"></a>
 <span class="sourceLineNo">189</span>        }<a name="line.189"></a>
-<span class="sourceLineNo">190</span>        if (mergeEnabled) {<a name="line.190"></a>
-<span class="sourceLineNo">191</span>          RegionInfo hri2 = tableRegions.get(candidateIdx+1);<a name="line.191"></a>
-<span class="sourceLineNo">192</span>          long regionSize2 = getRegionSize(hri2);<a name="line.192"></a>
-<span class="sourceLineNo">193</span>          if (regionSize &gt;= 0 &amp;&amp; regionSize2 &gt;= 0 &amp;&amp; regionSize + regionSize2 &lt; avgRegionSize) {<a name="line.193"></a>
-<span class="sourceLineNo">194</span>            LOG.info("Table " + table + ", small region size: " + regionSize<a name="line.194"></a>
-<span class="sourceLineNo">195</span>              + " plus its neighbor size: " + regionSize2<a name="line.195"></a>
-<span class="sourceLineNo">196</span>              + ", less than the avg size " + avgRegionSize + ", merging them");<a name="line.196"></a>
-<span class="sourceLineNo">197</span>            plans.add(new MergeNormalizationPlan(hri, hri2));<a name="line.197"></a>
-<span class="sourceLineNo">198</span>            candidateIdx++;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>          }<a name="line.199"></a>
-<span class="sourceLineNo">200</span>        }<a name="line.200"></a>
-<span class="sourceLineNo">201</span>      }<a name="line.201"></a>
-<span class="sourceLineNo">202</span>      candidateIdx++;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>    }<a name="line.203"></a>
-<span class="sourceLineNo">204</span>    if (plans.isEmpty()) {<a name="line.204"></a>
-<span class="sourceLineNo">205</span>      LOG.debug("No normalization needed, regions look good for table: " + table);<a name="line.205"></a>
-<span class="sourceLineNo">206</span>      return null;<a name="line.206"></a>
+<span class="sourceLineNo">190</span>      } else {<a name="line.190"></a>
+<span class="sourceLineNo">191</span>        if (candidateIdx == tableRegions.size()-1) {<a name="line.191"></a>
+<span class="sourceLineNo">192</span>          break;<a name="line.192"></a>
+<span class="sourceLineNo">193</span>        }<a name="line.193"></a>
+<span class="sourceLineNo">194</span>        if (mergeEnabled) {<a name="line.194"></a>
+<span class="sourceLineNo">195</span>          RegionInfo hri2 = tableRegions.get(candidateIdx+1);<a name="line.195"></a>
+<span class="sourceLineNo">196</span>          long regionSize2 = getRegionSize(hri2);<a name="line.196"></a>
+<span class="sourceLineNo">197</span>          if (regionSize &gt;= 0 &amp;&amp; regionSize2 &gt;= 0 &amp;&amp; regionSize + regionSize2 &lt; avgRegionSize) {<a name="line.197"></a>
+<span class="sourceLineNo">198</span>            LOG.info("Table " + table + ", small region size: " + regionSize<a name="line.198"></a>
+<span class="sourceLineNo">199</span>              + " plus its neighbor size: " + regionSize2<a name="line.199"></a>
+<span class="sourceLineNo">200</span>              + ", less than the avg size " + avgRegionSize + ", merging them");<a name="line.200"></a>
+<span class="sourceLineNo">201</span>            plans.add(new MergeNormalizationPlan(hri, hri2));<a name="line.201"></a>
+<span class="sourceLineNo">202</span>            candidateIdx++;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>          }<a name="line.203"></a>
+<span class="sourceLineNo">204</span>        }<a name="line.204"></a>
+<span class="sourceLineNo">205</span>      }<a name="line.205"></a>
+<span class="sourceLineNo">206</span>      candidateIdx++;<a name="line.206"></a>
 <span class="sourceLineNo">207</span>    }<a name="line.207"></a>
-<span class="sourceLineNo">208</span>    Collections.sort(plans, planComparator);<a name="line.208"></a>
-<span class="sourceLineNo">209</span>    return plans;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  }<a name="line.210"></a>
-<span class="sourceLineNo">211</span><a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private long getRegionSize(RegionInfo hri) {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>    ServerName sn = masterServices.getAssignmentManager().getRegionStates().<a name="line.213"></a>
-<span class="sourceLineNo">214</span>      getRegionServerOfRegion(hri);<a name="line.214"></a>
-<span class="sourceLineNo">215</span>    RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).<a name="line.215"></a>
-<span class="sourceLineNo">216</span>      getRegionMetrics().get(hri.getRegionName());<a name="line.216"></a>
-<span class="sourceLineNo">217</span>    if (regionLoad == null) {<a name="line.217"></a>
-<span class="sourceLineNo">218</span>      LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");<a name="line.218"></a>
-<span class="sourceLineNo">219</span>      return -1;<a name="line.219"></a>
-<span class="sourceLineNo">220</span>    }<a name="line.220"></a>
-<span class="sourceLineNo">221</span>    return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  }<a name="line.222"></a>
-<span class="sourceLineNo">223</span>}<a name="line.223"></a>
+<span class="sourceLineNo">208</span>    if (plans.isEmpty()) {<a name="line.208"></a>
+<span class="sourceLineNo">209</span>      LOG.debug("No normalization needed, regions look good for table: " + table);<a name="line.209"></a>
+<span class="sourceLineNo">210</span>      return null;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>    }<a name="line.211"></a>
+<span class="sourceLineNo">212</span>    Collections.sort(plans, planComparator);<a name="line.212"></a>
+<span class="sourceLineNo">213</span>    return plans;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  }<a name="line.214"></a>
+<span class="sourceLineNo">215</span><a name="line.215"></a>
+<span class="sourceLineNo">216</span>  private long getRegionSize(RegionInfo hri) {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>    ServerName sn = masterServices.getAssignmentManager().getRegionStates().<a name="line.217"></a>
+<span class="sourceLineNo">218</span>      getRegionServerOfRegion(hri);<a name="line.218"></a>
+<span class="sourceLineNo">219</span>    RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>      getRegionMetrics().get(hri.getRegionName());<a name="line.220"></a>
+<span class="sourceLineNo">221</span>    if (regionLoad == null) {<a name="line.221"></a>
+<span class="sourceLineNo">222</span>      LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");<a name="line.222"></a>
+<span class="sourceLineNo">223</span>      return -1;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>    }<a name="line.224"></a>
+<span class="sourceLineNo">225</span>    return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);<a name="line.225"></a>
+<span class="sourceLineNo">226</span>  }<a name="line.226"></a>
+<span class="sourceLineNo">227</span>}<a name="line.227"></a>
 
 
 


[35/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
index 44910ab..6acb62e 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":6,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":6,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -292,39 +292,31 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 </td>
 </tr>
 <tr id="i13" class="rowColor">
-<td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a></span>(long&nbsp;readPoint,
-          long&nbsp;order)</code>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-</td>
-</tr>
-<tr id="i14" class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a></span>(long&nbsp;readPoint,
-           long&nbsp;order)</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPoint)</code>&nbsp;</td>
 </tr>
-<tr id="i15" class="rowColor">
+<tr id="i14" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.html" title="class in org.apache.hadoop.hbase.regionserver">TimeRangeTracker</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i16" class="altColor">
+<tr id="i15" class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstKeyOnRow)</code>&nbsp;</td>
 </tr>
-<tr id="i17" class="rowColor">
+<tr id="i16" class="altColor">
 <td class="colFirst"><code>long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i18" class="altColor">
+<tr id="i17" class="rowColor">
 <td class="colFirst"><code>protected long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
               boolean&nbsp;succ)</code>&nbsp;</td>
 </tr>
-<tr id="i19" class="rowColor">
+<tr id="i18" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i20" class="altColor">
+<tr id="i19" class="rowColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a></span>(long&nbsp;delta,
        long&nbsp;heapOverhead,
@@ -332,45 +324,45 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <div class="block">Updates the size counters of the segment by the given delta</div>
 </td>
 </tr>
-<tr id="i21" class="rowColor">
+<tr id="i20" class="altColor">
 <td class="colFirst"><code>protected long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a></span>(boolean&nbsp;offHeap)</code>&nbsp;</td>
 </tr>
-<tr id="i22" class="altColor">
+<tr id="i21" class="rowColor">
 <td class="colFirst"><code>protected long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a></span>(boolean&nbsp;onHeap)</code>&nbsp;</td>
 </tr>
-<tr id="i23" class="rowColor">
+<tr id="i22" class="altColor">
 <td class="colFirst"><code>protected abstract long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntrySize--">indexEntrySize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i24" class="altColor">
+<tr id="i23" class="rowColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
            boolean&nbsp;mslabUsed,
            <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</code>&nbsp;</td>
 </tr>
-<tr id="i25" class="rowColor">
+<tr id="i24" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i26" class="altColor">
+<tr id="i25" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i27" class="rowColor">
+<tr id="i26" class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i28" class="altColor">
+<tr id="i27" class="rowColor">
 <td class="colFirst"><code>long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i29" class="rowColor">
+<tr id="i28" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i30" class="altColor">
+<tr id="i29" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                        boolean&nbsp;forceCloneOfBigCell)</code>
@@ -380,16 +372,16 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  When a cell's size is too big (bigger than maxAlloc), it is not allocated on MSLAB.</div>
 </td>
 </tr>
-<tr id="i31" class="rowColor">
+<tr id="i30" class="altColor">
 <td class="colFirst"><code>long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i32" class="altColor">
+<tr id="i31" class="rowColor">
 <td class="colFirst"><code>protected long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                  boolean&nbsp;succ)</code>&nbsp;</td>
 </tr>
-<tr id="i33" class="rowColor">
+<tr id="i32" class="altColor">
 <td class="colFirst"><code>protected <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetOld,
           <a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetNew)</code>
@@ -397,29 +389,29 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  immutable CellSet after its creation in immutable segment constructor</div>
 </td>
 </tr>
-<tr id="i34" class="altColor">
+<tr id="i33" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/io/TimeRange.html" title="class in org.apache.hadoop.hbase.io">TimeRange</a>&nbsp;tr,
           long&nbsp;oldestUnexpiredTS)</code>&nbsp;</td>
 </tr>
-<tr id="i35" class="rowColor">
+<tr id="i34" class="altColor">
 <td class="colFirst"><code>protected <a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstCell)</code>
 <div class="block">Returns a subset of the segment cell set, which starts with the given cell</div>
 </td>
 </tr>
-<tr id="i36" class="altColor">
+<tr id="i35" class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#toString--">toString</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i37" class="rowColor">
+<tr id="i36" class="altColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
               boolean&nbsp;succ,
               boolean&nbsp;mslabUsed,
               <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</code>&nbsp;</td>
 </tr>
-<tr id="i38" class="altColor">
+<tr id="i37" class="rowColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
               boolean&nbsp;succ,
@@ -600,29 +592,13 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 </dl>
 </li>
 </ul>
-<a name="getScanner-long-long-">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>getScanner</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.125">getScanner</a>(long&nbsp;readPoint,
-                                  long&nbsp;order)</pre>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-<dl>
-<dt><span class="returnLabel">Returns:</span></dt>
-<dd>a scanner for the given read point</dd>
-</dl>
-</li>
-</ul>
-<a name="getScanners-long-long-">
+<a name="getScanners-long-">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>getScanners</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.129">getScanners</a>(long&nbsp;readPoint,
-                                         long&nbsp;order)</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.121">getScanners</a>(long&nbsp;readPoint)</pre>
 </li>
 </ul>
 <a name="isEmpty--">
@@ -631,7 +607,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>isEmpty</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.136">isEmpty</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.128">isEmpty</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>whether the segment has any cells</dd>
@@ -644,7 +620,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getCellsCount</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.143">getCellsCount</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.135">getCellsCount</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>number of cells in segment</dd>
@@ -657,7 +633,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>close</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.150">close</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.142">close</a>()</pre>
 <div class="block">Closing a segment before it is being discarded</div>
 </li>
 </ul>
@@ -667,7 +643,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>maybeCloneWithAllocator</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.169">maybeCloneWithAllocator</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.161">maybeCloneWithAllocator</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                                     boolean&nbsp;forceCloneOfBigCell)</pre>
 <div class="block">If the segment has a memory allocator the cell is being cloned to this space, and returned;
  otherwise the given cell is returned
@@ -688,7 +664,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getCellLength</h4>
-<pre>static&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.187">getCellLength</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
+<pre>static&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.179">getCellLength</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
 <div class="block">Get cell length after serialized in <a href="../../../../../org/apache/hadoop/hbase/KeyValue.html" title="class in org.apache.hadoop.hbase"><code>KeyValue</code></a></div>
 </li>
 </ul>
@@ -698,7 +674,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>shouldSeek</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.191">shouldSeek</a>(<a href="../../../../../org/apache/hadoop/hbase/io/TimeRange.html" title="class in org.apache.hadoop.hbase.io">TimeRange</a>&nbsp;tr,
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.183">shouldSeek</a>(<a href="../../../../../org/apache/hadoop/hbase/io/TimeRange.html" title="class in org.apache.hadoop.hbase.io">TimeRange</a>&nbsp;tr,
                           long&nbsp;oldestUnexpiredTS)</pre>
 </li>
 </ul>
@@ -708,7 +684,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>isTagsPresent</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.197">isTagsPresent</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.189">isTagsPresent</a>()</pre>
 </li>
 </ul>
 <a name="incScannerCount--">
@@ -717,7 +693,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>incScannerCount</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.201">incScannerCount</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.193">incScannerCount</a>()</pre>
 </li>
 </ul>
 <a name="decScannerCount--">
@@ -726,7 +702,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>decScannerCount</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.207">decScannerCount</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.199">decScannerCount</a>()</pre>
 </li>
 </ul>
 <a name="setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">
@@ -735,7 +711,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>setCellSet</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.219">setCellSet</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetOld,
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.211">setCellSet</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetOld,
                              <a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetNew)</pre>
 <div class="block">Setting the CellSet of the segment - used only for flat immutable segment for setting
  immutable CellSet after its creation in immutable segment constructor</div>
@@ -751,7 +727,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getMemStoreSize</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSize.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSize</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.224">getMemStoreSize</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSize.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSize</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.216">getMemStoreSize</a>()</pre>
 </li>
 </ul>
 <a name="keySize--">
@@ -760,7 +736,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>keySize</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.231">keySize</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.223">keySize</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>Sum of all cell's size.</dd>
@@ -773,7 +749,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>heapSize</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.238">heapSize</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.230">heapSize</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>The heap size of this segment.</dd>
@@ -786,7 +762,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>offHeapSize</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.245">offHeapSize</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.237">offHeapSize</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>The off-heap size of this segment.</dd>
@@ -799,7 +775,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>incSize</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.253">incSize</a>(long&nbsp;delta,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.245">incSize</a>(long&nbsp;delta,
                        long&nbsp;heapOverhead,
                        long&nbsp;offHeapOverhead)</pre>
 <div class="block">Updates the size counters of the segment by the given delta</div>
@@ -811,7 +787,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getMinSequenceId</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.259">getMinSequenceId</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.251">getMinSequenceId</a>()</pre>
 </li>
 </ul>
 <a name="getTimeRangeTracker--">
@@ -820,7 +796,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getTimeRangeTracker</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.html" title="class in org.apache.hadoop.hbase.regionserver">TimeRangeTracker</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.263">getTimeRangeTracker</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.html" title="class in org.apache.hadoop.hbase.regionserver">TimeRangeTracker</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.255">getTimeRangeTracker</a>()</pre>
 </li>
 </ul>
 <a name="last--">
@@ -829,7 +805,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>last</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.268">last</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.260">last</a>()</pre>
 </li>
 </ul>
 <a name="iterator--">
@@ -838,7 +814,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>iterator</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.272">iterator</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.264">iterator</a>()</pre>
 </li>
 </ul>
 <a name="headSet-org.apache.hadoop.hbase.Cell-">
@@ -847,7 +823,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>headSet</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.276">headSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstKeyOnRow)</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.268">headSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstKeyOnRow)</pre>
 </li>
 </ul>
 <a name="compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">
@@ -856,7 +832,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>compare</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.280">compare</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.272">compare</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
                    <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;right)</pre>
 </li>
 </ul>
@@ -866,7 +842,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>compareRows</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.284">compareRows</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.276">compareRows</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
                        <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;right)</pre>
 </li>
 </ul>
@@ -876,7 +852,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getCellSet</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.291">getCellSet</a>()</pre>
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.283">getCellSet</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>a set of all cells in the segment</dd>
@@ -889,7 +865,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getComparator</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/CellComparator.html" title="interface in org.apache.hadoop.hbase">CellComparator</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.299">getComparator</a>()</pre>
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/CellComparator.html" title="interface in org.apache.hadoop.hbase">CellComparator</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.291">getComparator</a>()</pre>
 <div class="block">Returns the Cell comparator used by this segment</div>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
@@ -903,7 +879,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>internalAdd</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.303">internalAdd</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.295">internalAdd</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                            boolean&nbsp;mslabUsed,
                            <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 </li>
@@ -914,7 +890,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>updateMetaInfo</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.308">updateMetaInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.300">updateMetaInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
                               boolean&nbsp;succ,
                               boolean&nbsp;mslabUsed,
                               <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
@@ -926,7 +902,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>updateMetaInfo</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.334">updateMetaInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.326">updateMetaInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
                               boolean&nbsp;succ,
                               <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 </li>
@@ -937,7 +913,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>heapSizeChange</h4>
-<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.342">heapSizeChange</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.334">heapSizeChange</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                               boolean&nbsp;succ)</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
@@ -952,7 +928,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>offHeapSizeChange</h4>
-<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.359">offHeapSizeChange</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.351">offHeapSizeChange</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                                  boolean&nbsp;succ)</pre>
 </li>
 </ul>
@@ -962,7 +938,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>indexEntryOnHeapSize</h4>
-<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.376">indexEntryOnHeapSize</a>(boolean&nbsp;onHeap)</pre>
+<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.368">indexEntryOnHeapSize</a>(boolean&nbsp;onHeap)</pre>
 </li>
 </ul>
 <a name="indexEntryOffHeapSize-boolean-">
@@ -971,7 +947,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>indexEntryOffHeapSize</h4>
-<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.382">indexEntryOffHeapSize</a>(boolean&nbsp;offHeap)</pre>
+<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.374">indexEntryOffHeapSize</a>(boolean&nbsp;offHeap)</pre>
 </li>
 </ul>
 <a name="indexEntrySize--">
@@ -980,7 +956,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>indexEntrySize</h4>
-<pre>protected abstract&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.388">indexEntrySize</a>()</pre>
+<pre>protected abstract&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.380">indexEntrySize</a>()</pre>
 </li>
 </ul>
 <a name="tailSet-org.apache.hadoop.hbase.Cell-">
@@ -989,7 +965,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>tailSet</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.395">tailSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstCell)</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.387">tailSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstCell)</pre>
 <div class="block">Returns a subset of the segment cell set, which starts with the given cell</div>
 <dl>
 <dt><span class="paramLabel">Parameters:</span></dt>
@@ -1005,7 +981,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getMemStoreLAB</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreLAB.html" title="interface in org.apache.hadoop.hbase.regionserver">MemStoreLAB</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.400">getMemStoreLAB</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreLAB.html" title="interface in org.apache.hadoop.hbase.regionserver">MemStoreLAB</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.392">getMemStoreLAB</a>()</pre>
 </li>
 </ul>
 <a name="dump-org.slf4j.Logger-">
@@ -1014,7 +990,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>dump</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.408">dump</a>(org.slf4j.Logger&nbsp;log)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.400">dump</a>(org.slf4j.Logger&nbsp;log)</pre>
 <div class="block">Dumps all cells of the segment into the given log</div>
 </li>
 </ul>
@@ -1024,7 +1000,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.415">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/Segment.html#line.407">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>


[16/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed to create lock file " + 

<TRUNCATED>

[22/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
index df992af..7e921cb 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
@@ -43,375 +43,357 @@
 <span class="sourceLineNo">035</span>@InterfaceAudience.Private<a name="line.35"></a>
 <span class="sourceLineNo">036</span>public class SegmentScanner implements KeyValueScanner {<a name="line.36"></a>
 <span class="sourceLineNo">037</span><a name="line.37"></a>
-<span class="sourceLineNo">038</span>  /**<a name="line.38"></a>
-<span class="sourceLineNo">039</span>   * Order of this scanner relative to other scanners. See<a name="line.39"></a>
-<span class="sourceLineNo">040</span>   * {@link KeyValueScanner#getScannerOrder()}.<a name="line.40"></a>
-<span class="sourceLineNo">041</span>   */<a name="line.41"></a>
-<span class="sourceLineNo">042</span>  private long scannerOrder;<a name="line.42"></a>
-<span class="sourceLineNo">043</span>  private static final long DEFAULT_SCANNER_ORDER = Long.MAX_VALUE;<a name="line.43"></a>
-<span class="sourceLineNo">044</span><a name="line.44"></a>
-<span class="sourceLineNo">045</span>  // the observed structure<a name="line.45"></a>
-<span class="sourceLineNo">046</span>  protected final Segment segment;<a name="line.46"></a>
-<span class="sourceLineNo">047</span>  // the highest relevant MVCC<a name="line.47"></a>
-<span class="sourceLineNo">048</span>  private long readPoint;<a name="line.48"></a>
-<span class="sourceLineNo">049</span>  // the current iterator that can be reinitialized by<a name="line.49"></a>
-<span class="sourceLineNo">050</span>  // seek(), backwardSeek(), or reseek()<a name="line.50"></a>
-<span class="sourceLineNo">051</span>  protected Iterator&lt;Cell&gt; iter;<a name="line.51"></a>
-<span class="sourceLineNo">052</span>  // the pre-calculated cell to be returned by peek()<a name="line.52"></a>
-<span class="sourceLineNo">053</span>  protected Cell current = null;<a name="line.53"></a>
-<span class="sourceLineNo">054</span>  // or next()<a name="line.54"></a>
-<span class="sourceLineNo">055</span>  // A flag represents whether could stop skipping KeyValues for MVCC<a name="line.55"></a>
-<span class="sourceLineNo">056</span>  // if have encountered the next row. Only used for reversed scan<a name="line.56"></a>
-<span class="sourceLineNo">057</span>  private boolean stopSkippingKVsIfNextRow = false;<a name="line.57"></a>
-<span class="sourceLineNo">058</span>  // last iterated KVs by seek (to restore the iterator state after reseek)<a name="line.58"></a>
-<span class="sourceLineNo">059</span>  private Cell last = null;<a name="line.59"></a>
-<span class="sourceLineNo">060</span><a name="line.60"></a>
-<span class="sourceLineNo">061</span>  // flag to indicate if this scanner is closed<a name="line.61"></a>
-<span class="sourceLineNo">062</span>  protected boolean closed = false;<a name="line.62"></a>
-<span class="sourceLineNo">063</span><a name="line.63"></a>
-<span class="sourceLineNo">064</span>  protected SegmentScanner(Segment segment, long readPoint) {<a name="line.64"></a>
-<span class="sourceLineNo">065</span>    this(segment, readPoint, DEFAULT_SCANNER_ORDER);<a name="line.65"></a>
-<span class="sourceLineNo">066</span>  }<a name="line.66"></a>
-<span class="sourceLineNo">067</span><a name="line.67"></a>
-<span class="sourceLineNo">068</span>  /**<a name="line.68"></a>
-<span class="sourceLineNo">069</span>   * @param scannerOrder see {@link KeyValueScanner#getScannerOrder()}.<a name="line.69"></a>
-<span class="sourceLineNo">070</span>   * Scanners are ordered from 0 (oldest) to newest in increasing order.<a name="line.70"></a>
-<span class="sourceLineNo">071</span>   */<a name="line.71"></a>
-<span class="sourceLineNo">072</span>  protected SegmentScanner(Segment segment, long readPoint, long scannerOrder) {<a name="line.72"></a>
-<span class="sourceLineNo">073</span>    this.segment = segment;<a name="line.73"></a>
-<span class="sourceLineNo">074</span>    this.readPoint = readPoint;<a name="line.74"></a>
-<span class="sourceLineNo">075</span>    //increase the reference count so the underlying structure will not be de-allocated<a name="line.75"></a>
-<span class="sourceLineNo">076</span>    this.segment.incScannerCount();<a name="line.76"></a>
-<span class="sourceLineNo">077</span>    iter = segment.iterator();<a name="line.77"></a>
-<span class="sourceLineNo">078</span>    // the initialization of the current is required for working with heap of SegmentScanners<a name="line.78"></a>
-<span class="sourceLineNo">079</span>    updateCurrent();<a name="line.79"></a>
-<span class="sourceLineNo">080</span>    this.scannerOrder = scannerOrder;<a name="line.80"></a>
-<span class="sourceLineNo">081</span>    if (current == null) {<a name="line.81"></a>
-<span class="sourceLineNo">082</span>      // nothing to fetch from this scanner<a name="line.82"></a>
-<span class="sourceLineNo">083</span>      close();<a name="line.83"></a>
+<span class="sourceLineNo">038</span><a name="line.38"></a>
+<span class="sourceLineNo">039</span>  // the observed structure<a name="line.39"></a>
+<span class="sourceLineNo">040</span>  protected final Segment segment;<a name="line.40"></a>
+<span class="sourceLineNo">041</span>  // the highest relevant MVCC<a name="line.41"></a>
+<span class="sourceLineNo">042</span>  private long readPoint;<a name="line.42"></a>
+<span class="sourceLineNo">043</span>  // the current iterator that can be reinitialized by<a name="line.43"></a>
+<span class="sourceLineNo">044</span>  // seek(), backwardSeek(), or reseek()<a name="line.44"></a>
+<span class="sourceLineNo">045</span>  protected Iterator&lt;Cell&gt; iter;<a name="line.45"></a>
+<span class="sourceLineNo">046</span>  // the pre-calculated cell to be returned by peek()<a name="line.46"></a>
+<span class="sourceLineNo">047</span>  protected Cell current = null;<a name="line.47"></a>
+<span class="sourceLineNo">048</span>  // or next()<a name="line.48"></a>
+<span class="sourceLineNo">049</span>  // A flag represents whether could stop skipping KeyValues for MVCC<a name="line.49"></a>
+<span class="sourceLineNo">050</span>  // if have encountered the next row. Only used for reversed scan<a name="line.50"></a>
+<span class="sourceLineNo">051</span>  private boolean stopSkippingKVsIfNextRow = false;<a name="line.51"></a>
+<span class="sourceLineNo">052</span>  // last iterated KVs by seek (to restore the iterator state after reseek)<a name="line.52"></a>
+<span class="sourceLineNo">053</span>  private Cell last = null;<a name="line.53"></a>
+<span class="sourceLineNo">054</span><a name="line.54"></a>
+<span class="sourceLineNo">055</span>  // flag to indicate if this scanner is closed<a name="line.55"></a>
+<span class="sourceLineNo">056</span>  protected boolean closed = false;<a name="line.56"></a>
+<span class="sourceLineNo">057</span><a name="line.57"></a>
+<span class="sourceLineNo">058</span><a name="line.58"></a>
+<span class="sourceLineNo">059</span>  /**<a name="line.59"></a>
+<span class="sourceLineNo">060</span>   * Scanners are ordered from 0 (oldest) to newest in increasing order.<a name="line.60"></a>
+<span class="sourceLineNo">061</span>   */<a name="line.61"></a>
+<span class="sourceLineNo">062</span>  protected SegmentScanner(Segment segment, long readPoint) {<a name="line.62"></a>
+<span class="sourceLineNo">063</span>    this.segment = segment;<a name="line.63"></a>
+<span class="sourceLineNo">064</span>    this.readPoint = readPoint;<a name="line.64"></a>
+<span class="sourceLineNo">065</span>    //increase the reference count so the underlying structure will not be de-allocated<a name="line.65"></a>
+<span class="sourceLineNo">066</span>    this.segment.incScannerCount();<a name="line.66"></a>
+<span class="sourceLineNo">067</span>    iter = segment.iterator();<a name="line.67"></a>
+<span class="sourceLineNo">068</span>    // the initialization of the current is required for working with heap of SegmentScanners<a name="line.68"></a>
+<span class="sourceLineNo">069</span>    updateCurrent();<a name="line.69"></a>
+<span class="sourceLineNo">070</span>    if (current == null) {<a name="line.70"></a>
+<span class="sourceLineNo">071</span>      // nothing to fetch from this scanner<a name="line.71"></a>
+<span class="sourceLineNo">072</span>      close();<a name="line.72"></a>
+<span class="sourceLineNo">073</span>    }<a name="line.73"></a>
+<span class="sourceLineNo">074</span>  }<a name="line.74"></a>
+<span class="sourceLineNo">075</span><a name="line.75"></a>
+<span class="sourceLineNo">076</span>  /**<a name="line.76"></a>
+<span class="sourceLineNo">077</span>   * Look at the next Cell in this scanner, but do not iterate the scanner<a name="line.77"></a>
+<span class="sourceLineNo">078</span>   * @return the currently observed Cell<a name="line.78"></a>
+<span class="sourceLineNo">079</span>   */<a name="line.79"></a>
+<span class="sourceLineNo">080</span>  @Override<a name="line.80"></a>
+<span class="sourceLineNo">081</span>  public Cell peek() {          // sanity check, the current should be always valid<a name="line.81"></a>
+<span class="sourceLineNo">082</span>    if (closed) {<a name="line.82"></a>
+<span class="sourceLineNo">083</span>      return null;<a name="line.83"></a>
 <span class="sourceLineNo">084</span>    }<a name="line.84"></a>
-<span class="sourceLineNo">085</span>  }<a name="line.85"></a>
-<span class="sourceLineNo">086</span><a name="line.86"></a>
-<span class="sourceLineNo">087</span>  /**<a name="line.87"></a>
-<span class="sourceLineNo">088</span>   * Look at the next Cell in this scanner, but do not iterate the scanner<a name="line.88"></a>
-<span class="sourceLineNo">089</span>   * @return the currently observed Cell<a name="line.89"></a>
-<span class="sourceLineNo">090</span>   */<a name="line.90"></a>
-<span class="sourceLineNo">091</span>  @Override<a name="line.91"></a>
-<span class="sourceLineNo">092</span>  public Cell peek() {          // sanity check, the current should be always valid<a name="line.92"></a>
-<span class="sourceLineNo">093</span>    if (closed) {<a name="line.93"></a>
-<span class="sourceLineNo">094</span>      return null;<a name="line.94"></a>
-<span class="sourceLineNo">095</span>    }<a name="line.95"></a>
-<span class="sourceLineNo">096</span>    if (current!=null &amp;&amp; current.getSequenceId() &gt; readPoint) {<a name="line.96"></a>
-<span class="sourceLineNo">097</span>      throw new RuntimeException("current is invalid: read point is "+readPoint+", " +<a name="line.97"></a>
-<span class="sourceLineNo">098</span>          "while current sequence id is " +current.getSequenceId());<a name="line.98"></a>
-<span class="sourceLineNo">099</span>    }<a name="line.99"></a>
-<span class="sourceLineNo">100</span>    return current;<a name="line.100"></a>
-<span class="sourceLineNo">101</span>  }<a name="line.101"></a>
-<span class="sourceLineNo">102</span><a name="line.102"></a>
-<span class="sourceLineNo">103</span>  /**<a name="line.103"></a>
-<span class="sourceLineNo">104</span>   * Return the next Cell in this scanner, iterating the scanner<a name="line.104"></a>
-<span class="sourceLineNo">105</span>   * @return the next Cell or null if end of scanner<a name="line.105"></a>
-<span class="sourceLineNo">106</span>   */<a name="line.106"></a>
-<span class="sourceLineNo">107</span>  @Override<a name="line.107"></a>
-<span class="sourceLineNo">108</span>  public Cell next() throws IOException {<a name="line.108"></a>
-<span class="sourceLineNo">109</span>    if (closed) {<a name="line.109"></a>
-<span class="sourceLineNo">110</span>      return null;<a name="line.110"></a>
-<span class="sourceLineNo">111</span>    }<a name="line.111"></a>
-<span class="sourceLineNo">112</span>    Cell oldCurrent = current;<a name="line.112"></a>
-<span class="sourceLineNo">113</span>    updateCurrent();                  // update the currently observed Cell<a name="line.113"></a>
-<span class="sourceLineNo">114</span>    return oldCurrent;<a name="line.114"></a>
-<span class="sourceLineNo">115</span>  }<a name="line.115"></a>
-<span class="sourceLineNo">116</span><a name="line.116"></a>
-<span class="sourceLineNo">117</span>  /**<a name="line.117"></a>
-<span class="sourceLineNo">118</span>   * Seek the scanner at or after the specified Cell.<a name="line.118"></a>
-<span class="sourceLineNo">119</span>   * @param cell seek value<a name="line.119"></a>
-<span class="sourceLineNo">120</span>   * @return true if scanner has values left, false if end of scanner<a name="line.120"></a>
-<span class="sourceLineNo">121</span>   */<a name="line.121"></a>
-<span class="sourceLineNo">122</span>  @Override<a name="line.122"></a>
-<span class="sourceLineNo">123</span>  public boolean seek(Cell cell) throws IOException {<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    if (closed) {<a name="line.124"></a>
-<span class="sourceLineNo">125</span>      return false;<a name="line.125"></a>
-<span class="sourceLineNo">126</span>    }<a name="line.126"></a>
-<span class="sourceLineNo">127</span>    if(cell == null) {<a name="line.127"></a>
-<span class="sourceLineNo">128</span>      close();<a name="line.128"></a>
-<span class="sourceLineNo">129</span>      return false;<a name="line.129"></a>
-<span class="sourceLineNo">130</span>    }<a name="line.130"></a>
-<span class="sourceLineNo">131</span>    // restart the iterator from new key<a name="line.131"></a>
-<span class="sourceLineNo">132</span>    iter = getIterator(cell);<a name="line.132"></a>
-<span class="sourceLineNo">133</span>    // last is going to be reinitialized in the next getNext() call<a name="line.133"></a>
-<span class="sourceLineNo">134</span>    last = null;<a name="line.134"></a>
-<span class="sourceLineNo">135</span>    updateCurrent();<a name="line.135"></a>
-<span class="sourceLineNo">136</span>    return (current != null);<a name="line.136"></a>
-<span class="sourceLineNo">137</span>  }<a name="line.137"></a>
-<span class="sourceLineNo">138</span><a name="line.138"></a>
-<span class="sourceLineNo">139</span>  protected Iterator&lt;Cell&gt; getIterator(Cell cell) {<a name="line.139"></a>
-<span class="sourceLineNo">140</span>    return segment.tailSet(cell).iterator();<a name="line.140"></a>
-<span class="sourceLineNo">141</span>  }<a name="line.141"></a>
-<span class="sourceLineNo">142</span><a name="line.142"></a>
-<span class="sourceLineNo">143</span>  /**<a name="line.143"></a>
-<span class="sourceLineNo">144</span>   * Reseek the scanner at or after the specified KeyValue.<a name="line.144"></a>
-<span class="sourceLineNo">145</span>   * This method is guaranteed to seek at or after the required key only if the<a name="line.145"></a>
-<span class="sourceLineNo">146</span>   * key comes after the current position of the scanner. Should not be used<a name="line.146"></a>
-<span class="sourceLineNo">147</span>   * to seek to a key which may come before the current position.<a name="line.147"></a>
-<span class="sourceLineNo">148</span>   *<a name="line.148"></a>
-<span class="sourceLineNo">149</span>   * @param cell seek value (should be non-null)<a name="line.149"></a>
-<span class="sourceLineNo">150</span>   * @return true if scanner has values left, false if end of scanner<a name="line.150"></a>
-<span class="sourceLineNo">151</span>   */<a name="line.151"></a>
-<span class="sourceLineNo">152</span>  @Override<a name="line.152"></a>
-<span class="sourceLineNo">153</span>  public boolean reseek(Cell cell) throws IOException {<a name="line.153"></a>
-<span class="sourceLineNo">154</span>    if (closed) {<a name="line.154"></a>
-<span class="sourceLineNo">155</span>      return false;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>    }<a name="line.156"></a>
-<span class="sourceLineNo">157</span>    /*<a name="line.157"></a>
-<span class="sourceLineNo">158</span>    See HBASE-4195 &amp; HBASE-3855 &amp; HBASE-6591 for the background on this implementation.<a name="line.158"></a>
-<span class="sourceLineNo">159</span>    This code is executed concurrently with flush and puts, without locks.<a name="line.159"></a>
-<span class="sourceLineNo">160</span>    The ideal implementation for performance would use the sub skip list implicitly<a name="line.160"></a>
-<span class="sourceLineNo">161</span>    pointed by the iterator. Unfortunately the Java API does not offer a method to<a name="line.161"></a>
-<span class="sourceLineNo">162</span>    get it. So we remember the last keys we iterated to and restore<a name="line.162"></a>
-<span class="sourceLineNo">163</span>    the reseeked set to at least that point.<a name="line.163"></a>
-<span class="sourceLineNo">164</span>    */<a name="line.164"></a>
-<span class="sourceLineNo">165</span>    iter = getIterator(getHighest(cell, last));<a name="line.165"></a>
-<span class="sourceLineNo">166</span>    updateCurrent();<a name="line.166"></a>
-<span class="sourceLineNo">167</span>    return (current != null);<a name="line.167"></a>
-<span class="sourceLineNo">168</span>  }<a name="line.168"></a>
-<span class="sourceLineNo">169</span><a name="line.169"></a>
-<span class="sourceLineNo">170</span>  /**<a name="line.170"></a>
-<span class="sourceLineNo">171</span>   * Seek the scanner at or before the row of specified Cell, it firstly<a name="line.171"></a>
-<span class="sourceLineNo">172</span>   * tries to seek the scanner at or after the specified Cell, return if<a name="line.172"></a>
-<span class="sourceLineNo">173</span>   * peek KeyValue of scanner has the same row with specified Cell,<a name="line.173"></a>
-<span class="sourceLineNo">174</span>   * otherwise seek the scanner at the first Cell of the row which is the<a name="line.174"></a>
-<span class="sourceLineNo">175</span>   * previous row of specified KeyValue<a name="line.175"></a>
-<span class="sourceLineNo">176</span>   *<a name="line.176"></a>
-<span class="sourceLineNo">177</span>   * @param key seek Cell<a name="line.177"></a>
-<span class="sourceLineNo">178</span>   * @return true if the scanner is at the valid KeyValue, false if such Cell does not exist<a name="line.178"></a>
-<span class="sourceLineNo">179</span>   */<a name="line.179"></a>
-<span class="sourceLineNo">180</span>  @Override<a name="line.180"></a>
-<span class="sourceLineNo">181</span>  public boolean backwardSeek(Cell key) throws IOException {<a name="line.181"></a>
-<span class="sourceLineNo">182</span>    if (closed) {<a name="line.182"></a>
-<span class="sourceLineNo">183</span>      return false;<a name="line.183"></a>
-<span class="sourceLineNo">184</span>    }<a name="line.184"></a>
-<span class="sourceLineNo">185</span>    seek(key);    // seek forward then go backward<a name="line.185"></a>
-<span class="sourceLineNo">186</span>    if (peek() == null || segment.compareRows(peek(), key) &gt; 0) {<a name="line.186"></a>
-<span class="sourceLineNo">187</span>      return seekToPreviousRow(key);<a name="line.187"></a>
-<span class="sourceLineNo">188</span>    }<a name="line.188"></a>
-<span class="sourceLineNo">189</span>    return true;<a name="line.189"></a>
-<span class="sourceLineNo">190</span>  }<a name="line.190"></a>
-<span class="sourceLineNo">191</span><a name="line.191"></a>
-<span class="sourceLineNo">192</span>  /**<a name="line.192"></a>
-<span class="sourceLineNo">193</span>   * Seek the scanner at the first Cell of the row which is the previous row<a name="line.193"></a>
-<span class="sourceLineNo">194</span>   * of specified key<a name="line.194"></a>
-<span class="sourceLineNo">195</span>   *<a name="line.195"></a>
-<span class="sourceLineNo">196</span>   * @param cell seek value<a name="line.196"></a>
-<span class="sourceLineNo">197</span>   * @return true if the scanner at the first valid Cell of previous row,<a name="line.197"></a>
-<span class="sourceLineNo">198</span>   *     false if not existing such Cell<a name="line.198"></a>
-<span class="sourceLineNo">199</span>   */<a name="line.199"></a>
-<span class="sourceLineNo">200</span>  @Override<a name="line.200"></a>
-<span class="sourceLineNo">201</span>  public boolean seekToPreviousRow(Cell cell) throws IOException {<a name="line.201"></a>
-<span class="sourceLineNo">202</span>    if (closed) {<a name="line.202"></a>
-<span class="sourceLineNo">203</span>      return false;<a name="line.203"></a>
-<span class="sourceLineNo">204</span>    }<a name="line.204"></a>
-<span class="sourceLineNo">205</span>    boolean keepSeeking;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>    Cell key = cell;<a name="line.206"></a>
-<span class="sourceLineNo">207</span>    do {<a name="line.207"></a>
-<span class="sourceLineNo">208</span>      Cell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key);<a name="line.208"></a>
-<span class="sourceLineNo">209</span>      SortedSet&lt;Cell&gt; cellHead = segment.headSet(firstKeyOnRow);<a name="line.209"></a>
-<span class="sourceLineNo">210</span>      Cell lastCellBeforeRow = cellHead.isEmpty() ? null : cellHead.last();<a name="line.210"></a>
-<span class="sourceLineNo">211</span>      if (lastCellBeforeRow == null) {<a name="line.211"></a>
-<span class="sourceLineNo">212</span>        current = null;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>        return false;<a name="line.213"></a>
-<span class="sourceLineNo">214</span>      }<a name="line.214"></a>
-<span class="sourceLineNo">215</span>      Cell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow);<a name="line.215"></a>
-<span class="sourceLineNo">216</span>      this.stopSkippingKVsIfNextRow = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>      seek(firstKeyOnPreviousRow);<a name="line.217"></a>
-<span class="sourceLineNo">218</span>      this.stopSkippingKVsIfNextRow = false;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>      if (peek() == null<a name="line.219"></a>
-<span class="sourceLineNo">220</span>          || segment.getComparator().compareRows(peek(), firstKeyOnPreviousRow) &gt; 0) {<a name="line.220"></a>
-<span class="sourceLineNo">221</span>        keepSeeking = true;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>        key = firstKeyOnPreviousRow;<a name="line.222"></a>
-<span class="sourceLineNo">223</span>        continue;<a name="line.223"></a>
-<span class="sourceLineNo">224</span>      } else {<a name="line.224"></a>
-<span class="sourceLineNo">225</span>        keepSeeking = false;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>      }<a name="line.226"></a>
-<span class="sourceLineNo">227</span>    } while (keepSeeking);<a name="line.227"></a>
-<span class="sourceLineNo">228</span>    return true;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  }<a name="line.229"></a>
-<span class="sourceLineNo">230</span><a name="line.230"></a>
-<span class="sourceLineNo">231</span>  /**<a name="line.231"></a>
-<span class="sourceLineNo">232</span>   * Seek the scanner at the first KeyValue of last row<a name="line.232"></a>
-<span class="sourceLineNo">233</span>   *<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * @return true if scanner has values left, false if the underlying data is empty<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   */<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  @Override<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  public boolean seekToLastRow() throws IOException {<a name="line.237"></a>
-<span class="sourceLineNo">238</span>    if (closed) {<a name="line.238"></a>
-<span class="sourceLineNo">239</span>      return false;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>    }<a name="line.240"></a>
-<span class="sourceLineNo">241</span>    Cell higherCell = segment.isEmpty() ? null : segment.last();<a name="line.241"></a>
-<span class="sourceLineNo">242</span>    if (higherCell == null) {<a name="line.242"></a>
-<span class="sourceLineNo">243</span>      return false;<a name="line.243"></a>
-<span class="sourceLineNo">244</span>    }<a name="line.244"></a>
-<span class="sourceLineNo">245</span><a name="line.245"></a>
-<span class="sourceLineNo">246</span>    Cell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell);<a name="line.246"></a>
-<span class="sourceLineNo">247</span><a name="line.247"></a>
-<span class="sourceLineNo">248</span>    if (seek(firstCellOnLastRow)) {<a name="line.248"></a>
-<span class="sourceLineNo">249</span>      return true;<a name="line.249"></a>
-<span class="sourceLineNo">250</span>    } else {<a name="line.250"></a>
-<span class="sourceLineNo">251</span>      return seekToPreviousRow(higherCell);<a name="line.251"></a>
+<span class="sourceLineNo">085</span>    if (current!=null &amp;&amp; current.getSequenceId() &gt; readPoint) {<a name="line.85"></a>
+<span class="sourceLineNo">086</span>      throw new RuntimeException("current is invalid: read point is "+readPoint+", " +<a name="line.86"></a>
+<span class="sourceLineNo">087</span>          "while current sequence id is " +current.getSequenceId());<a name="line.87"></a>
+<span class="sourceLineNo">088</span>    }<a name="line.88"></a>
+<span class="sourceLineNo">089</span>    return current;<a name="line.89"></a>
+<span class="sourceLineNo">090</span>  }<a name="line.90"></a>
+<span class="sourceLineNo">091</span><a name="line.91"></a>
+<span class="sourceLineNo">092</span>  /**<a name="line.92"></a>
+<span class="sourceLineNo">093</span>   * Return the next Cell in this scanner, iterating the scanner<a name="line.93"></a>
+<span class="sourceLineNo">094</span>   * @return the next Cell or null if end of scanner<a name="line.94"></a>
+<span class="sourceLineNo">095</span>   */<a name="line.95"></a>
+<span class="sourceLineNo">096</span>  @Override<a name="line.96"></a>
+<span class="sourceLineNo">097</span>  public Cell next() throws IOException {<a name="line.97"></a>
+<span class="sourceLineNo">098</span>    if (closed) {<a name="line.98"></a>
+<span class="sourceLineNo">099</span>      return null;<a name="line.99"></a>
+<span class="sourceLineNo">100</span>    }<a name="line.100"></a>
+<span class="sourceLineNo">101</span>    Cell oldCurrent = current;<a name="line.101"></a>
+<span class="sourceLineNo">102</span>    updateCurrent();                  // update the currently observed Cell<a name="line.102"></a>
+<span class="sourceLineNo">103</span>    return oldCurrent;<a name="line.103"></a>
+<span class="sourceLineNo">104</span>  }<a name="line.104"></a>
+<span class="sourceLineNo">105</span><a name="line.105"></a>
+<span class="sourceLineNo">106</span>  /**<a name="line.106"></a>
+<span class="sourceLineNo">107</span>   * Seek the scanner at or after the specified Cell.<a name="line.107"></a>
+<span class="sourceLineNo">108</span>   * @param cell seek value<a name="line.108"></a>
+<span class="sourceLineNo">109</span>   * @return true if scanner has values left, false if end of scanner<a name="line.109"></a>
+<span class="sourceLineNo">110</span>   */<a name="line.110"></a>
+<span class="sourceLineNo">111</span>  @Override<a name="line.111"></a>
+<span class="sourceLineNo">112</span>  public boolean seek(Cell cell) throws IOException {<a name="line.112"></a>
+<span class="sourceLineNo">113</span>    if (closed) {<a name="line.113"></a>
+<span class="sourceLineNo">114</span>      return false;<a name="line.114"></a>
+<span class="sourceLineNo">115</span>    }<a name="line.115"></a>
+<span class="sourceLineNo">116</span>    if(cell == null) {<a name="line.116"></a>
+<span class="sourceLineNo">117</span>      close();<a name="line.117"></a>
+<span class="sourceLineNo">118</span>      return false;<a name="line.118"></a>
+<span class="sourceLineNo">119</span>    }<a name="line.119"></a>
+<span class="sourceLineNo">120</span>    // restart the iterator from new key<a name="line.120"></a>
+<span class="sourceLineNo">121</span>    iter = getIterator(cell);<a name="line.121"></a>
+<span class="sourceLineNo">122</span>    // last is going to be reinitialized in the next getNext() call<a name="line.122"></a>
+<span class="sourceLineNo">123</span>    last = null;<a name="line.123"></a>
+<span class="sourceLineNo">124</span>    updateCurrent();<a name="line.124"></a>
+<span class="sourceLineNo">125</span>    return (current != null);<a name="line.125"></a>
+<span class="sourceLineNo">126</span>  }<a name="line.126"></a>
+<span class="sourceLineNo">127</span><a name="line.127"></a>
+<span class="sourceLineNo">128</span>  protected Iterator&lt;Cell&gt; getIterator(Cell cell) {<a name="line.128"></a>
+<span class="sourceLineNo">129</span>    return segment.tailSet(cell).iterator();<a name="line.129"></a>
+<span class="sourceLineNo">130</span>  }<a name="line.130"></a>
+<span class="sourceLineNo">131</span><a name="line.131"></a>
+<span class="sourceLineNo">132</span>  /**<a name="line.132"></a>
+<span class="sourceLineNo">133</span>   * Reseek the scanner at or after the specified KeyValue.<a name="line.133"></a>
+<span class="sourceLineNo">134</span>   * This method is guaranteed to seek at or after the required key only if the<a name="line.134"></a>
+<span class="sourceLineNo">135</span>   * key comes after the current position of the scanner. Should not be used<a name="line.135"></a>
+<span class="sourceLineNo">136</span>   * to seek to a key which may come before the current position.<a name="line.136"></a>
+<span class="sourceLineNo">137</span>   *<a name="line.137"></a>
+<span class="sourceLineNo">138</span>   * @param cell seek value (should be non-null)<a name="line.138"></a>
+<span class="sourceLineNo">139</span>   * @return true if scanner has values left, false if end of scanner<a name="line.139"></a>
+<span class="sourceLineNo">140</span>   */<a name="line.140"></a>
+<span class="sourceLineNo">141</span>  @Override<a name="line.141"></a>
+<span class="sourceLineNo">142</span>  public boolean reseek(Cell cell) throws IOException {<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    if (closed) {<a name="line.143"></a>
+<span class="sourceLineNo">144</span>      return false;<a name="line.144"></a>
+<span class="sourceLineNo">145</span>    }<a name="line.145"></a>
+<span class="sourceLineNo">146</span>    /*<a name="line.146"></a>
+<span class="sourceLineNo">147</span>    See HBASE-4195 &amp; HBASE-3855 &amp; HBASE-6591 for the background on this implementation.<a name="line.147"></a>
+<span class="sourceLineNo">148</span>    This code is executed concurrently with flush and puts, without locks.<a name="line.148"></a>
+<span class="sourceLineNo">149</span>    The ideal implementation for performance would use the sub skip list implicitly<a name="line.149"></a>
+<span class="sourceLineNo">150</span>    pointed by the iterator. Unfortunately the Java API does not offer a method to<a name="line.150"></a>
+<span class="sourceLineNo">151</span>    get it. So we remember the last keys we iterated to and restore<a name="line.151"></a>
+<span class="sourceLineNo">152</span>    the reseeked set to at least that point.<a name="line.152"></a>
+<span class="sourceLineNo">153</span>    */<a name="line.153"></a>
+<span class="sourceLineNo">154</span>    iter = getIterator(getHighest(cell, last));<a name="line.154"></a>
+<span class="sourceLineNo">155</span>    updateCurrent();<a name="line.155"></a>
+<span class="sourceLineNo">156</span>    return (current != null);<a name="line.156"></a>
+<span class="sourceLineNo">157</span>  }<a name="line.157"></a>
+<span class="sourceLineNo">158</span><a name="line.158"></a>
+<span class="sourceLineNo">159</span>  /**<a name="line.159"></a>
+<span class="sourceLineNo">160</span>   * Seek the scanner at or before the row of specified Cell, it firstly<a name="line.160"></a>
+<span class="sourceLineNo">161</span>   * tries to seek the scanner at or after the specified Cell, return if<a name="line.161"></a>
+<span class="sourceLineNo">162</span>   * peek KeyValue of scanner has the same row with specified Cell,<a name="line.162"></a>
+<span class="sourceLineNo">163</span>   * otherwise seek the scanner at the first Cell of the row which is the<a name="line.163"></a>
+<span class="sourceLineNo">164</span>   * previous row of specified KeyValue<a name="line.164"></a>
+<span class="sourceLineNo">165</span>   *<a name="line.165"></a>
+<span class="sourceLineNo">166</span>   * @param key seek Cell<a name="line.166"></a>
+<span class="sourceLineNo">167</span>   * @return true if the scanner is at the valid KeyValue, false if such Cell does not exist<a name="line.167"></a>
+<span class="sourceLineNo">168</span>   */<a name="line.168"></a>
+<span class="sourceLineNo">169</span>  @Override<a name="line.169"></a>
+<span class="sourceLineNo">170</span>  public boolean backwardSeek(Cell key) throws IOException {<a name="line.170"></a>
+<span class="sourceLineNo">171</span>    if (closed) {<a name="line.171"></a>
+<span class="sourceLineNo">172</span>      return false;<a name="line.172"></a>
+<span class="sourceLineNo">173</span>    }<a name="line.173"></a>
+<span class="sourceLineNo">174</span>    seek(key);    // seek forward then go backward<a name="line.174"></a>
+<span class="sourceLineNo">175</span>    if (peek() == null || segment.compareRows(peek(), key) &gt; 0) {<a name="line.175"></a>
+<span class="sourceLineNo">176</span>      return seekToPreviousRow(key);<a name="line.176"></a>
+<span class="sourceLineNo">177</span>    }<a name="line.177"></a>
+<span class="sourceLineNo">178</span>    return true;<a name="line.178"></a>
+<span class="sourceLineNo">179</span>  }<a name="line.179"></a>
+<span class="sourceLineNo">180</span><a name="line.180"></a>
+<span class="sourceLineNo">181</span>  /**<a name="line.181"></a>
+<span class="sourceLineNo">182</span>   * Seek the scanner at the first Cell of the row which is the previous row<a name="line.182"></a>
+<span class="sourceLineNo">183</span>   * of specified key<a name="line.183"></a>
+<span class="sourceLineNo">184</span>   *<a name="line.184"></a>
+<span class="sourceLineNo">185</span>   * @param cell seek value<a name="line.185"></a>
+<span class="sourceLineNo">186</span>   * @return true if the scanner at the first valid Cell of previous row,<a name="line.186"></a>
+<span class="sourceLineNo">187</span>   *     false if not existing such Cell<a name="line.187"></a>
+<span class="sourceLineNo">188</span>   */<a name="line.188"></a>
+<span class="sourceLineNo">189</span>  @Override<a name="line.189"></a>
+<span class="sourceLineNo">190</span>  public boolean seekToPreviousRow(Cell cell) throws IOException {<a name="line.190"></a>
+<span class="sourceLineNo">191</span>    if (closed) {<a name="line.191"></a>
+<span class="sourceLineNo">192</span>      return false;<a name="line.192"></a>
+<span class="sourceLineNo">193</span>    }<a name="line.193"></a>
+<span class="sourceLineNo">194</span>    boolean keepSeeking;<a name="line.194"></a>
+<span class="sourceLineNo">195</span>    Cell key = cell;<a name="line.195"></a>
+<span class="sourceLineNo">196</span>    do {<a name="line.196"></a>
+<span class="sourceLineNo">197</span>      Cell firstKeyOnRow = PrivateCellUtil.createFirstOnRow(key);<a name="line.197"></a>
+<span class="sourceLineNo">198</span>      SortedSet&lt;Cell&gt; cellHead = segment.headSet(firstKeyOnRow);<a name="line.198"></a>
+<span class="sourceLineNo">199</span>      Cell lastCellBeforeRow = cellHead.isEmpty() ? null : cellHead.last();<a name="line.199"></a>
+<span class="sourceLineNo">200</span>      if (lastCellBeforeRow == null) {<a name="line.200"></a>
+<span class="sourceLineNo">201</span>        current = null;<a name="line.201"></a>
+<span class="sourceLineNo">202</span>        return false;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>      }<a name="line.203"></a>
+<span class="sourceLineNo">204</span>      Cell firstKeyOnPreviousRow = PrivateCellUtil.createFirstOnRow(lastCellBeforeRow);<a name="line.204"></a>
+<span class="sourceLineNo">205</span>      this.stopSkippingKVsIfNextRow = true;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>      seek(firstKeyOnPreviousRow);<a name="line.206"></a>
+<span class="sourceLineNo">207</span>      this.stopSkippingKVsIfNextRow = false;<a name="line.207"></a>
+<span class="sourceLineNo">208</span>      if (peek() == null<a name="line.208"></a>
+<span class="sourceLineNo">209</span>          || segment.getComparator().compareRows(peek(), firstKeyOnPreviousRow) &gt; 0) {<a name="line.209"></a>
+<span class="sourceLineNo">210</span>        keepSeeking = true;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>        key = firstKeyOnPreviousRow;<a name="line.211"></a>
+<span class="sourceLineNo">212</span>        continue;<a name="line.212"></a>
+<span class="sourceLineNo">213</span>      } else {<a name="line.213"></a>
+<span class="sourceLineNo">214</span>        keepSeeking = false;<a name="line.214"></a>
+<span class="sourceLineNo">215</span>      }<a name="line.215"></a>
+<span class="sourceLineNo">216</span>    } while (keepSeeking);<a name="line.216"></a>
+<span class="sourceLineNo">217</span>    return true;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  }<a name="line.218"></a>
+<span class="sourceLineNo">219</span><a name="line.219"></a>
+<span class="sourceLineNo">220</span>  /**<a name="line.220"></a>
+<span class="sourceLineNo">221</span>   * Seek the scanner at the first KeyValue of last row<a name="line.221"></a>
+<span class="sourceLineNo">222</span>   *<a name="line.222"></a>
+<span class="sourceLineNo">223</span>   * @return true if scanner has values left, false if the underlying data is empty<a name="line.223"></a>
+<span class="sourceLineNo">224</span>   */<a name="line.224"></a>
+<span class="sourceLineNo">225</span>  @Override<a name="line.225"></a>
+<span class="sourceLineNo">226</span>  public boolean seekToLastRow() throws IOException {<a name="line.226"></a>
+<span class="sourceLineNo">227</span>    if (closed) {<a name="line.227"></a>
+<span class="sourceLineNo">228</span>      return false;<a name="line.228"></a>
+<span class="sourceLineNo">229</span>    }<a name="line.229"></a>
+<span class="sourceLineNo">230</span>    Cell higherCell = segment.isEmpty() ? null : segment.last();<a name="line.230"></a>
+<span class="sourceLineNo">231</span>    if (higherCell == null) {<a name="line.231"></a>
+<span class="sourceLineNo">232</span>      return false;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>    }<a name="line.233"></a>
+<span class="sourceLineNo">234</span><a name="line.234"></a>
+<span class="sourceLineNo">235</span>    Cell firstCellOnLastRow = PrivateCellUtil.createFirstOnRow(higherCell);<a name="line.235"></a>
+<span class="sourceLineNo">236</span><a name="line.236"></a>
+<span class="sourceLineNo">237</span>    if (seek(firstCellOnLastRow)) {<a name="line.237"></a>
+<span class="sourceLineNo">238</span>      return true;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>    } else {<a name="line.239"></a>
+<span class="sourceLineNo">240</span>      return seekToPreviousRow(higherCell);<a name="line.240"></a>
+<span class="sourceLineNo">241</span>    }<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  }<a name="line.242"></a>
+<span class="sourceLineNo">243</span><a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  /**<a name="line.245"></a>
+<span class="sourceLineNo">246</span>   * Close the KeyValue scanner.<a name="line.246"></a>
+<span class="sourceLineNo">247</span>   */<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  @Override<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  public void close() {<a name="line.249"></a>
+<span class="sourceLineNo">250</span>    if (closed) {<a name="line.250"></a>
+<span class="sourceLineNo">251</span>      return;<a name="line.251"></a>
 <span class="sourceLineNo">252</span>    }<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  }<a name="line.253"></a>
-<span class="sourceLineNo">254</span><a name="line.254"></a>
-<span class="sourceLineNo">255</span>  /**<a name="line.255"></a>
-<span class="sourceLineNo">256</span>   * @see KeyValueScanner#getScannerOrder()<a name="line.256"></a>
-<span class="sourceLineNo">257</span>   */<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  @Override<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  public long getScannerOrder() {<a name="line.259"></a>
-<span class="sourceLineNo">260</span>    return scannerOrder;<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  }<a name="line.261"></a>
-<span class="sourceLineNo">262</span><a name="line.262"></a>
-<span class="sourceLineNo">263</span>  /**<a name="line.263"></a>
-<span class="sourceLineNo">264</span>   * Close the KeyValue scanner.<a name="line.264"></a>
-<span class="sourceLineNo">265</span>   */<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  @Override<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  public void close() {<a name="line.267"></a>
-<span class="sourceLineNo">268</span>    if (closed) {<a name="line.268"></a>
-<span class="sourceLineNo">269</span>      return;<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    }<a name="line.270"></a>
-<span class="sourceLineNo">271</span>    getSegment().decScannerCount();<a name="line.271"></a>
-<span class="sourceLineNo">272</span>    closed = true;<a name="line.272"></a>
+<span class="sourceLineNo">253</span>    getSegment().decScannerCount();<a name="line.253"></a>
+<span class="sourceLineNo">254</span>    closed = true;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  }<a name="line.255"></a>
+<span class="sourceLineNo">256</span><a name="line.256"></a>
+<span class="sourceLineNo">257</span>  /**<a name="line.257"></a>
+<span class="sourceLineNo">258</span>   * This functionality should be resolved in the higher level which is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>   * MemStoreScanner, currently returns true as default. Doesn't throw<a name="line.259"></a>
+<span class="sourceLineNo">260</span>   * IllegalStateException in order not to change the signature of the<a name="line.260"></a>
+<span class="sourceLineNo">261</span>   * overridden method<a name="line.261"></a>
+<span class="sourceLineNo">262</span>   */<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  @Override<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) {<a name="line.264"></a>
+<span class="sourceLineNo">265</span>    return getSegment().shouldSeek(scan.getColumnFamilyTimeRange()<a name="line.265"></a>
+<span class="sourceLineNo">266</span>            .getOrDefault(store.getColumnFamilyDescriptor().getName(), scan.getTimeRange()), oldestUnexpiredTS);<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  }<a name="line.267"></a>
+<span class="sourceLineNo">268</span><a name="line.268"></a>
+<span class="sourceLineNo">269</span>  @Override<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  public boolean requestSeek(Cell c, boolean forward, boolean useBloom)<a name="line.270"></a>
+<span class="sourceLineNo">271</span>      throws IOException {<a name="line.271"></a>
+<span class="sourceLineNo">272</span>    return NonLazyKeyValueScanner.doRealSeek(this, c, forward);<a name="line.272"></a>
 <span class="sourceLineNo">273</span>  }<a name="line.273"></a>
 <span class="sourceLineNo">274</span><a name="line.274"></a>
 <span class="sourceLineNo">275</span>  /**<a name="line.275"></a>
-<span class="sourceLineNo">276</span>   * This functionality should be resolved in the higher level which is<a name="line.276"></a>
-<span class="sourceLineNo">277</span>   * MemStoreScanner, currently returns true as default. Doesn't throw<a name="line.277"></a>
-<span class="sourceLineNo">278</span>   * IllegalStateException in order not to change the signature of the<a name="line.278"></a>
-<span class="sourceLineNo">279</span>   * overridden method<a name="line.279"></a>
-<span class="sourceLineNo">280</span>   */<a name="line.280"></a>
-<span class="sourceLineNo">281</span>  @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>  public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>    return getSegment().shouldSeek(scan.getColumnFamilyTimeRange()<a name="line.283"></a>
-<span class="sourceLineNo">284</span>            .getOrDefault(store.getColumnFamilyDescriptor().getName(), scan.getTimeRange()), oldestUnexpiredTS);<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  }<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  @Override<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  public boolean requestSeek(Cell c, boolean forward, boolean useBloom)<a name="line.288"></a>
-<span class="sourceLineNo">289</span>      throws IOException {<a name="line.289"></a>
-<span class="sourceLineNo">290</span>    return NonLazyKeyValueScanner.doRealSeek(this, c, forward);<a name="line.290"></a>
-<span class="sourceLineNo">291</span>  }<a name="line.291"></a>
-<span class="sourceLineNo">292</span><a name="line.292"></a>
-<span class="sourceLineNo">293</span>  /**<a name="line.293"></a>
-<span class="sourceLineNo">294</span>   * This scanner is working solely on the in-memory MemStore and doesn't work on<a name="line.294"></a>
-<span class="sourceLineNo">295</span>   * store files, MutableCellSetSegmentScanner always does the seek,<a name="line.295"></a>
-<span class="sourceLineNo">296</span>   * therefore always returning true.<a name="line.296"></a>
+<span class="sourceLineNo">276</span>   * This scanner is working solely on the in-memory MemStore and doesn't work on<a name="line.276"></a>
+<span class="sourceLineNo">277</span>   * store files, MutableCellSetSegmentScanner always does the seek,<a name="line.277"></a>
+<span class="sourceLineNo">278</span>   * therefore always returning true.<a name="line.278"></a>
+<span class="sourceLineNo">279</span>   */<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  @Override<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  public boolean realSeekDone() {<a name="line.281"></a>
+<span class="sourceLineNo">282</span>    return true;<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  }<a name="line.283"></a>
+<span class="sourceLineNo">284</span><a name="line.284"></a>
+<span class="sourceLineNo">285</span>  /**<a name="line.285"></a>
+<span class="sourceLineNo">286</span>   * This function should be never called on scanners that always do real seek operations (i.e. most<a name="line.286"></a>
+<span class="sourceLineNo">287</span>   * of the scanners and also this one). The easiest way to achieve this is to call<a name="line.287"></a>
+<span class="sourceLineNo">288</span>   * {@link #realSeekDone()} first.<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   */<a name="line.289"></a>
+<span class="sourceLineNo">290</span>  @Override<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  public void enforceSeek() throws IOException {<a name="line.291"></a>
+<span class="sourceLineNo">292</span>    throw new NotImplementedException("enforceSeek cannot be called on a SegmentScanner");<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  }<a name="line.293"></a>
+<span class="sourceLineNo">294</span><a name="line.294"></a>
+<span class="sourceLineNo">295</span>  /**<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * @return true if this is a file scanner. Otherwise a memory scanner is assumed.<a name="line.296"></a>
 <span class="sourceLineNo">297</span>   */<a name="line.297"></a>
 <span class="sourceLineNo">298</span>  @Override<a name="line.298"></a>
-<span class="sourceLineNo">299</span>  public boolean realSeekDone() {<a name="line.299"></a>
-<span class="sourceLineNo">300</span>    return true;<a name="line.300"></a>
+<span class="sourceLineNo">299</span>  public boolean isFileScanner() {<a name="line.299"></a>
+<span class="sourceLineNo">300</span>    return false;<a name="line.300"></a>
 <span class="sourceLineNo">301</span>  }<a name="line.301"></a>
 <span class="sourceLineNo">302</span><a name="line.302"></a>
-<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * This function should be never called on scanners that always do real seek operations (i.e. most<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * of the scanners and also this one). The easiest way to achieve this is to call<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * {@link #realSeekDone()} first.<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   */<a name="line.307"></a>
-<span class="sourceLineNo">308</span>  @Override<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  public void enforceSeek() throws IOException {<a name="line.309"></a>
-<span class="sourceLineNo">310</span>    throw new NotImplementedException("enforceSeek cannot be called on a SegmentScanner");<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  }<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * @return true if this is a file scanner. Otherwise a memory scanner is assumed.<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   */<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  @Override<a name="line.316"></a>
-<span class="sourceLineNo">317</span>  public boolean isFileScanner() {<a name="line.317"></a>
-<span class="sourceLineNo">318</span>    return false;<a name="line.318"></a>
-<span class="sourceLineNo">319</span>  }<a name="line.319"></a>
-<span class="sourceLineNo">320</span><a name="line.320"></a>
-<span class="sourceLineNo">321</span>  @Override<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  public Path getFilePath() {<a name="line.322"></a>
-<span class="sourceLineNo">323</span>    return null;<a name="line.323"></a>
-<span class="sourceLineNo">324</span>  }<a name="line.324"></a>
-<span class="sourceLineNo">325</span><a name="line.325"></a>
-<span class="sourceLineNo">326</span>  /**<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @return the next key in the index (the key to seek to the next block)<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   *     if known, or null otherwise<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   *     Not relevant for in-memory scanner<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  @Override<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  public Cell getNextIndexedKey() {<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    return null;<a name="line.333"></a>
+<span class="sourceLineNo">303</span>  @Override<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  public Path getFilePath() {<a name="line.304"></a>
+<span class="sourceLineNo">305</span>    return null;<a name="line.305"></a>
+<span class="sourceLineNo">306</span>  }<a name="line.306"></a>
+<span class="sourceLineNo">307</span><a name="line.307"></a>
+<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * @return the next key in the index (the key to seek to the next block)<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   *     if known, or null otherwise<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   *     Not relevant for in-memory scanner<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  @Override<a name="line.313"></a>
+<span class="sourceLineNo">314</span>  public Cell getNextIndexedKey() {<a name="line.314"></a>
+<span class="sourceLineNo">315</span>    return null;<a name="line.315"></a>
+<span class="sourceLineNo">316</span>  }<a name="line.316"></a>
+<span class="sourceLineNo">317</span><a name="line.317"></a>
+<span class="sourceLineNo">318</span>  /**<a name="line.318"></a>
+<span class="sourceLineNo">319</span>   * Called after a batch of rows scanned (RPC) and set to be returned to client. Any in between<a name="line.319"></a>
+<span class="sourceLineNo">320</span>   * cleanup can be done here. Nothing to be done for MutableCellSetSegmentScanner.<a name="line.320"></a>
+<span class="sourceLineNo">321</span>   */<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  @Override<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  public void shipped() throws IOException {<a name="line.323"></a>
+<span class="sourceLineNo">324</span>    // do nothing<a name="line.324"></a>
+<span class="sourceLineNo">325</span>  }<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  //debug method<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  @Override<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  public String toString() {<a name="line.329"></a>
+<span class="sourceLineNo">330</span>    String res = "Store segment scanner of type "+this.getClass().getName()+"; ";<a name="line.330"></a>
+<span class="sourceLineNo">331</span>    res += "Scanner order " + getScannerOrder() + "; ";<a name="line.331"></a>
+<span class="sourceLineNo">332</span>    res += getSegment().toString();<a name="line.332"></a>
+<span class="sourceLineNo">333</span>    return res;<a name="line.333"></a>
 <span class="sourceLineNo">334</span>  }<a name="line.334"></a>
 <span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  /**<a name="line.336"></a>
-<span class="sourceLineNo">337</span>   * Called after a batch of rows scanned (RPC) and set to be returned to client. Any in between<a name="line.337"></a>
-<span class="sourceLineNo">338</span>   * cleanup can be done here. Nothing to be done for MutableCellSetSegmentScanner.<a name="line.338"></a>
-<span class="sourceLineNo">339</span>   */<a name="line.339"></a>
-<span class="sourceLineNo">340</span>  @Override<a name="line.340"></a>
-<span class="sourceLineNo">341</span>  public void shipped() throws IOException {<a name="line.341"></a>
-<span class="sourceLineNo">342</span>    // do nothing<a name="line.342"></a>
-<span class="sourceLineNo">343</span>  }<a name="line.343"></a>
-<span class="sourceLineNo">344</span><a name="line.344"></a>
-<span class="sourceLineNo">345</span>  //debug method<a name="line.345"></a>
-<span class="sourceLineNo">346</span>  @Override<a name="line.346"></a>
-<span class="sourceLineNo">347</span>  public String toString() {<a name="line.347"></a>
-<span class="sourceLineNo">348</span>    String res = "Store segment scanner of type "+this.getClass().getName()+"; ";<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    res += "Scanner order " + getScannerOrder() + "; ";<a name="line.349"></a>
-<span class="sourceLineNo">350</span>    res += getSegment().toString();<a name="line.350"></a>
-<span class="sourceLineNo">351</span>    return res;<a name="line.351"></a>
-<span class="sourceLineNo">352</span>  }<a name="line.352"></a>
-<span class="sourceLineNo">353</span><a name="line.353"></a>
-<span class="sourceLineNo">354</span>  /********************* Private Methods **********************/<a name="line.354"></a>
-<span class="sourceLineNo">355</span><a name="line.355"></a>
-<span class="sourceLineNo">356</span>  private Segment getSegment(){<a name="line.356"></a>
-<span class="sourceLineNo">357</span>    return segment;<a name="line.357"></a>
-<span class="sourceLineNo">358</span>  }<a name="line.358"></a>
-<span class="sourceLineNo">359</span><a name="line.359"></a>
-<span class="sourceLineNo">360</span>  /**<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   * Private internal method for iterating over the segment,<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * skipping the cells with irrelevant MVCC<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   */<a name="line.363"></a>
-<span class="sourceLineNo">364</span>  protected void updateCurrent() {<a name="line.364"></a>
-<span class="sourceLineNo">365</span>    Cell startKV = current;<a name="line.365"></a>
-<span class="sourceLineNo">366</span>    Cell next = null;<a name="line.366"></a>
-<span class="sourceLineNo">367</span><a name="line.367"></a>
-<span class="sourceLineNo">368</span>    try {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>      while (iter.hasNext()) {<a name="line.369"></a>
-<span class="sourceLineNo">370</span>        next = iter.next();<a name="line.370"></a>
-<span class="sourceLineNo">371</span>        if (next.getSequenceId() &lt;= this.readPoint) {<a name="line.371"></a>
-<span class="sourceLineNo">372</span>          current = next;<a name="line.372"></a>
-<span class="sourceLineNo">373</span>          return;// skip irrelevant versions<a name="line.373"></a>
-<span class="sourceLineNo">374</span>        }<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        if (stopSkippingKVsIfNextRow &amp;&amp;   // for backwardSeek() stay in the<a name="line.375"></a>
-<span class="sourceLineNo">376</span>            startKV != null &amp;&amp;        // boundaries of a single row<a name="line.376"></a>
-<span class="sourceLineNo">377</span>            segment.compareRows(next, startKV) &gt; 0) {<a name="line.377"></a>
-<span class="sourceLineNo">378</span>          current = null;<a name="line.378"></a>
-<span class="sourceLineNo">379</span>          return;<a name="line.379"></a>
-<span class="sourceLineNo">380</span>        }<a name="line.380"></a>
-<span class="sourceLineNo">381</span>      } // end of while<a name="line.381"></a>
-<span class="sourceLineNo">382</span><a name="line.382"></a>
-<span class="sourceLineNo">383</span>      current = null; // nothing found<a name="line.383"></a>
-<span class="sourceLineNo">384</span>    } finally {<a name="line.384"></a>
-<span class="sourceLineNo">385</span>      if (next != null) {<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        // in all cases, remember the last KV we iterated to, needed for reseek()<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        last = next;<a name="line.387"></a>
-<span class="sourceLineNo">388</span>      }<a name="line.388"></a>
-<span class="sourceLineNo">389</span>    }<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  }<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>  /**<a name="line.392"></a>
-<span class="sourceLineNo">393</span>   * Private internal method that returns the higher of the two key values, or null<a name="line.393"></a>
-<span class="sourceLineNo">394</span>   * if they are both null<a name="line.394"></a>
-<span class="sourceLineNo">395</span>   */<a name="line.395"></a>
-<span class="sourceLineNo">396</span>  private Cell getHighest(Cell first, Cell second) {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>    if (first == null &amp;&amp; second == null) {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>      return null;<a name="line.398"></a>
-<span class="sourceLineNo">399</span>    }<a name="line.399"></a>
-<span class="sourceLineNo">400</span>    if (first != null &amp;&amp; second != null) {<a name="line.400"></a>
-<span class="sourceLineNo">401</span>      int compare = segment.compare(first, second);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>      return (compare &gt; 0 ? first : second);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>    }<a name="line.403"></a>
-<span class="sourceLineNo">404</span>    return (first != null ? first : second);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>  }<a name="line.405"></a>
-<span class="sourceLineNo">406</span>}<a name="line.406"></a>
+<span class="sourceLineNo">336</span>  /********************* Private Methods **********************/<a name="line.336"></a>
+<span class="sourceLineNo">337</span><a name="line.337"></a>
+<span class="sourceLineNo">338</span>  private Segment getSegment(){<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    return segment;<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  /**<a name="line.342"></a>
+<span class="sourceLineNo">343</span>   * Private internal method for iterating over the segment,<a name="line.343"></a>
+<span class="sourceLineNo">344</span>   * skipping the cells with irrelevant MVCC<a name="line.344"></a>
+<span class="sourceLineNo">345</span>   */<a name="line.345"></a>
+<span class="sourceLineNo">346</span>  protected void updateCurrent() {<a name="line.346"></a>
+<span class="sourceLineNo">347</span>    Cell startKV = current;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>    Cell next = null;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>    try {<a name="line.350"></a>
+<span class="sourceLineNo">351</span>      while (iter.hasNext()) {<a name="line.351"></a>
+<span class="sourceLineNo">352</span>        next = iter.next();<a name="line.352"></a>
+<span class="sourceLineNo">353</span>        if (next.getSequenceId() &lt;= this.readPoint) {<a name="line.353"></a>
+<span class="sourceLineNo">354</span>          current = next;<a name="line.354"></a>
+<span class="sourceLineNo">355</span>          return;// skip irrelevant versions<a name="line.355"></a>
+<span class="sourceLineNo">356</span>        }<a name="line.356"></a>
+<span class="sourceLineNo">357</span>        if (stopSkippingKVsIfNextRow &amp;&amp;   // for backwardSeek() stay in the<a name="line.357"></a>
+<span class="sourceLineNo">358</span>            startKV != null &amp;&amp;        // boundaries of a single row<a name="line.358"></a>
+<span class="sourceLineNo">359</span>            segment.compareRows(next, startKV) &gt; 0) {<a name="line.359"></a>
+<span class="sourceLineNo">360</span>          current = null;<a name="line.360"></a>
+<span class="sourceLineNo">361</span>          return;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>        }<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      } // end of while<a name="line.363"></a>
+<span class="sourceLineNo">364</span><a name="line.364"></a>
+<span class="sourceLineNo">365</span>      current = null; // nothing found<a name="line.365"></a>
+<span class="sourceLineNo">366</span>    } finally {<a name="line.366"></a>
+<span class="sourceLineNo">367</span>      if (next != null) {<a name="line.367"></a>
+<span class="sourceLineNo">368</span>        // in all cases, remember the last KV we iterated to, needed for reseek()<a name="line.368"></a>
+<span class="sourceLineNo">369</span>        last = next;<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      }<a name="line.370"></a>
+<span class="sourceLineNo">371</span>    }<a name="line.371"></a>
+<span class="sourceLineNo">372</span>  }<a name="line.372"></a>
+<span class="sourceLineNo">373</span><a name="line.373"></a>
+<span class="sourceLineNo">374</span>  /**<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   * Private internal method that returns the higher of the two key values, or null<a name="line.375"></a>
+<span class="sourceLineNo">376</span>   * if they are both null<a name="line.376"></a>
+<span class="sourceLineNo">377</span>   */<a name="line.377"></a>
+<span class="sourceLineNo">378</span>  private Cell getHighest(Cell first, Cell second) {<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    if (first == null &amp;&amp; second == null) {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>      return null;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    }<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    if (first != null &amp;&amp; second != null) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      int compare = segment.compare(first, second);<a name="line.383"></a>
+<span class="sourceLineNo">384</span>      return (compare &gt; 0 ? first : second);<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    }<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    return (first != null ? first : second);<a name="line.386"></a>
+<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
+<span class="sourceLineNo">388</span>}<a name="line.388"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index 4f63e3a..ef16ae7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -1054,108 +1054,101 @@
 <span class="sourceLineNo">1046</span>    return false;<a name="line.1046"></a>
 <span class="sourceLineNo">1047</span>  }<a name="line.1047"></a>
 <span class="sourceLineNo">1048</span><a name="line.1048"></a>
-<span class="sourceLineNo">1049</span>  /**<a name="line.1049"></a>
-<span class="sourceLineNo">1050</span>   * @see KeyValueScanner#getScannerOrder()<a name="line.1050"></a>
-<span class="sourceLineNo">1051</span>   */<a name="line.1051"></a>
-<span class="sourceLineNo">1052</span>  @Override<a name="line.1052"></a>
-<span class="sourceLineNo">1053</span>  public long getScannerOrder() {<a name="line.1053"></a>
-<span class="sourceLineNo">1054</span>    return 0;<a name="line.1054"></a>
-<span class="sourceLineNo">1055</span>  }<a name="line.1055"></a>
-<span class="sourceLineNo">1056</span><a name="line.1056"></a>
-<span class="sourceLineNo">1057</span>  /**<a name="line.1057"></a>
-<span class="sourceLineNo">1058</span>   * Seek storefiles in parallel to optimize IO latency as much as possible<a name="line.1058"></a>
-<span class="sourceLineNo">1059</span>   * @param scanners the list {@link KeyValueScanner}s to be read from<a name="line.1059"></a>
-<span class="sourceLineNo">1060</span>   * @param kv the KeyValue on which the operation is being requested<a name="line.1060"></a>
-<span class="sourceLineNo">1061</span>   * @throws IOException<a name="line.1061"></a>
-<span class="sourceLineNo">1062</span>   */<a name="line.1062"></a>
-<span class="sourceLineNo">1063</span>  private void parallelSeek(final List&lt;? extends KeyValueScanner&gt;<a name="line.1063"></a>
-<span class="sourceLineNo">1064</span>      scanners, final Cell kv) throws IOException {<a name="line.1064"></a>
-<span class="sourceLineNo">1065</span>    if (scanners.isEmpty()) return;<a name="line.1065"></a>
-<span class="sourceLineNo">1066</span>    int storeFileScannerCount = scanners.size();<a name="line.1066"></a>
-<span class="sourceLineNo">1067</span>    CountDownLatch latch = new CountDownLatch(storeFileScannerCount);<a name="line.1067"></a>
-<span class="sourceLineNo">1068</span>    List&lt;ParallelSeekHandler&gt; handlers = new ArrayList&lt;&gt;(storeFileScannerCount);<a name="line.1068"></a>
-<span class="sourceLineNo">1069</span>    for (KeyValueScanner scanner : scanners) {<a name="line.1069"></a>
-<span class="sourceLineNo">1070</span>      if (scanner instanceof StoreFileScanner) {<a name="line.1070"></a>
-<span class="sourceLineNo">1071</span>        ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv,<a name="line.1071"></a>
-<span class="sourceLineNo">1072</span>          this.readPt, latch);<a name="line.1072"></a>
-<span class="sourceLineNo">1073</span>        executor.submit(seekHandler);<a name="line.1073"></a>
-<span class="sourceLineNo">1074</span>        handlers.add(seekHandler);<a name="line.1074"></a>
-<span class="sourceLineNo">1075</span>      } else {<a name="line.1075"></a>
-<span class="sourceLineNo">1076</span>        scanner.seek(kv);<a name="line.1076"></a>
-<span class="sourceLineNo">1077</span>        latch.countDown();<a name="line.1077"></a>
-<span class="sourceLineNo">1078</span>      }<a name="line.1078"></a>
-<span class="sourceLineNo">1079</span>    }<a name="line.1079"></a>
-<span class="sourceLineNo">1080</span><a name="line.1080"></a>
-<span class="sourceLineNo">1081</span>    try {<a name="line.1081"></a>
-<span class="sourceLineNo">1082</span>      latch.await();<a name="line.1082"></a>
-<span class="sourceLineNo">1083</span>    } catch (InterruptedException ie) {<a name="line.1083"></a>
-<span class="sourceLineNo">1084</span>      throw (InterruptedIOException)new InterruptedIOException().initCause(ie);<a name="line.1084"></a>
-<span class="sourceLineNo">1085</span>    }<a name="line.1085"></a>
+<span class="sourceLineNo">1049</span><a name="line.1049"></a>
+<span class="sourceLineNo">1050</span>  /**<a name="line.1050"></a>
+<span class="sourceLineNo">1051</span>   * Seek storefiles in parallel to optimize IO latency as much as possible<a name="line.1051"></a>
+<span class="sourceLineNo">1052</span>   * @param scanners the list {@link KeyValueScanner}s to be read from<a name="line.1052"></a>
+<span class="sourceLineNo">1053</span>   * @param kv the KeyValue on which the operation is being requested<a name="line.1053"></a>
+<span class="sourceLineNo">1054</span>   * @throws IOException<a name="line.1054"></a>
+<span class="sourceLineNo">1055</span>   */<a name="line.1055"></a>
+<span class="sourceLineNo">1056</span>  private void parallelSeek(final List&lt;? extends KeyValueScanner&gt;<a name="line.1056"></a>
+<span class="sourceLineNo">1057</span>      scanners, final Cell kv) throws IOException {<a name="line.1057"></a>
+<span class="sourceLineNo">1058</span>    if (scanners.isEmpty()) return;<a name="line.1058"></a>
+<span class="sourceLineNo">1059</span>    int storeFileScannerCount = scanners.size();<a name="line.1059"></a>
+<span class="sourceLineNo">1060</span>    CountDownLatch latch = new CountDownLatch(storeFileScannerCount);<a name="line.1060"></a>
+<span class="sourceLineNo">1061</span>    List&lt;ParallelSeekHandler&gt; handlers = new ArrayList&lt;&gt;(storeFileScannerCount);<a name="line.1061"></a>
+<span class="sourceLineNo">1062</span>    for (KeyValueScanner scanner : scanners) {<a name="line.1062"></a>
+<span class="sourceLineNo">1063</span>      if (scanner instanceof StoreFileScanner) {<a name="line.1063"></a>
+<span class="sourceLineNo">1064</span>        ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv,<a name="line.1064"></a>
+<span class="sourceLineNo">1065</span>          this.readPt, latch);<a name="line.1065"></a>
+<span class="sourceLineNo">1066</span>        executor.submit(seekHandler);<a name="line.1066"></a>
+<span class="sourceLineNo">1067</span>        handlers.add(seekHandler);<a name="line.1067"></a>
+<span class="sourceLineNo">1068</span>      } else {<a name="line.1068"></a>
+<span class="sourceLineNo">1069</span>        scanner.seek(kv);<a name="line.1069"></a>
+<span class="sourceLineNo">1070</span>        latch.countDown();<a name="line.1070"></a>
+<span class="sourceLineNo">1071</span>      }<a name="line.1071"></a>
+<span class="sourceLineNo">1072</span>    }<a name="line.1072"></a>
+<span class="sourceLineNo">1073</span><a name="line.1073"></a>
+<span class="sourceLineNo">1074</span>    try {<a name="line.1074"></a>
+<span class="sourceLineNo">1075</span>      latch.await();<a name="line.1075"></a>
+<span class="sourceLineNo">1076</span>    } catch (InterruptedException ie) {<a name="line.1076"></a>
+<span class="sourceLineNo">1077</span>      throw (InterruptedIOException)new InterruptedIOException().initCause(ie);<a name="line.1077"></a>
+<span class="sourceLineNo">1078</span>    }<a name="line.1078"></a>
+<span class="sourceLineNo">1079</span><a name="line.1079"></a>
+<span class="sourceLineNo">1080</span>    for (ParallelSeekHandler handler : handlers) {<a name="line.1080"></a>
+<span class="sourceLineNo">1081</span>      if (handler.getErr() != null) {<a name="line.1081"></a>
+<span class="sourceLineNo">1082</span>        throw new IOException(handler.getErr());<a name="line.1082"></a>
+<span class="sourceLineNo">1083</span>      }<a name="line.1083"></a>
+<span class="sourceLineNo">1084</span>    }<a name="line.1084"></a>
+<span class="sourceLineNo">1085</span>  }<a name="line.1085"></a>
 <span class="sourceLineNo">1086</span><a name="line.1086"></a>
-<span class="sourceLineNo">1087</span>    for (ParallelSeekHandler handler : handlers) {<a name="line.1087"></a>
-<span class="sourceLineNo">1088</span>      if (handler.getErr() != null) {<a name="line.1088"></a>
-<span class="sourceLineNo">1089</span>        throw new IOException(handler.getErr());<a name="line.1089"></a>
-<span class="sourceLineNo">1090</span>      }<a name="line.1090"></a>
-<span class="sourceLineNo">1091</span>    }<a name="line.1091"></a>
-<span class="sourceLineNo">1092</span>  }<a name="line.1092"></a>
-<span class="sourceLineNo">1093</span><a name="line.1093"></a>
-<span class="sourceLineNo">1094</span>  /**<a name="line.1094"></a>
-<span class="sourceLineNo">1095</span>   * Used in testing.<a name="line.1095"></a>
-<span class="sourceLineNo">1096</span>   * @return all scanners in no particular order<a name="line.1096"></a>
-<span class="sourceLineNo">1097</span>   */<a name="line.1097"></a>
-<span class="sourceLineNo">1098</span>  @VisibleForTesting<a name="line.1098"></a>
-<span class="sourceLineNo">1099</span>  List&lt;KeyValueScanner&gt; getAllScannersForTesting() {<a name="line.1099"></a>
-<span class="sourceLineNo">1100</span>    List&lt;KeyValueScanner&gt; allScanners = new ArrayList&lt;&gt;();<a name="line.1100"></a>
-<span class="sourceLineNo">1101</span>    KeyValueScanner current = heap.getCurrentForTesting();<a name="line.1101"></a>
-<span class="sourceLineNo">1102</span>    if (current != null)<a name="line.1102"></a>
-<span class="sourceLineNo">1103</span>      allScanners.add(current);<a name="line.1103"></a>
-<span class="sourceLineNo">1104</span>    for (KeyValueScanner scanner : heap.getHeap())<a name="line.1104"></a>
-<span class="sourceLineNo">1105</span>      allScanners.add(scanner);<a name="line.1105"></a>
-<span class="sourceLineNo">1106</span>    return allScanners;<a name="line.1106"></a>
-<span class="sourceLineNo">1107</span>  }<a name="line.1107"></a>
-<span class="sourceLineNo">1108</span><a name="line.1108"></a>
-<span class="sourceLineNo">1109</span>  static void enableLazySeekGlobally(boolean enable) {<a name="line.1109"></a>
-<span class="sourceLineNo">1110</span>    lazySeekEnabledGlobally = enable;<a name="line.1110"></a>
+<span class="sourceLineNo">1087</span>  /**<a name="line.1087"></a>
+<span class="sourceLineNo">1088</span>   * Used in testing.<a name="line.1088"></a>
+<span class="sourceLineNo">1089</span>   * @return all scanners in no particular order<a name="line.1089"></a>
+<span class="sourceLineNo">1090</span>   */<a name="line.1090"></a>
+<span class="sourceLineNo">1091</span>  @VisibleForTesting<a name="line.1091"></a>
+<span class="sourceLineNo">1092</span>  List&lt;KeyValueScanner&gt; getAllScannersForTesting() {<a name="line.1092"></a>
+<span class="sourceLineNo">1093</span>    List&lt;KeyValueScanner&gt; allScanners = new ArrayList&lt;&gt;();<a name="line.1093"></a>
+<span class="sourceLineNo">1094</span>    KeyValueScanner current = heap.getCurrentForTesting();<a name="line.1094"></a>
+<span class="sourceLineNo">1095</span>    if (current != null)<a name="line.1095"></a>
+<span class="sourceLineNo">1096</span>      allScanners.add(current);<a name="line.1096"></a>
+<span class="sourceLineNo">1097</span>    for (KeyValueScanner scanner : heap.getHeap())<a name="line.1097"></a>
+<span class="sourceLineNo">1098</span>      allScanners.add(scanner);<a name="line.1098"></a>
+<span class="sourceLineNo">1099</span>    return allScanners;<a name="line.1099"></a>
+<span class="sourceLineNo">1100</span>  }<a name="line.1100"></a>
+<span class="sourceLineNo">1101</span><a name="line.1101"></a>
+<span class="sourceLineNo">1102</span>  static void enableLazySeekGlobally(boolean enable) {<a name="line.1102"></a>
+<span class="sourceLineNo">1103</span>    lazySeekEnabledGlobally = enable;<a name="line.1103"></a>
+<span class="sourceLineNo">1104</span>  }<a name="line.1104"></a>
+<span class="sourceLineNo">1105</span><a name="line.1105"></a>
+<span class="sourceLineNo">1106</span>  /**<a name="line.1106"></a>
+<span class="sourceLineNo">1107</span>   * @return The estimated number of KVs seen by this scanner (includes some skipped KVs).<a name="line.1107"></a>
+<span class="sourceLineNo">1108</span>   */<a name="line.1108"></a>
+<span class="sourceLineNo">1109</span>  public long getEstimatedNumberOfKvsScanned() {<a name="line.1109"></a>
+<span class="sourceLineNo">1110</span>    return this.kvsScanned;<a name="line.1110"></a>
 <span class="sourceLineNo">1111</span>  }<a name="line.1111"></a>
 <span class="sourceLineNo">1112</span><a name="line.1112"></a>
-<span class="sourceLineNo">1113</span>  /**<a name="line.1113"></a>
-<span class="sourceLineNo">1114</span>   * @return The estimated number of KVs seen by this scanner (includes some skipped KVs).<a name="line.1114"></a>
-<span class="sourceLineNo">1115</span>   */<a name="line.1115"></a>
-<span class="sourceLineNo">1116</span>  public long getEstimatedNumberOfKvsScanned() {<a name="line.1116"></a>
-<span class="sourceLineNo">1117</span>    return this.kvsScanned;<a name="line.1117"></a>
-<span class="sourceLineNo">1118</span>  }<a name="line.1118"></a>
-<span class="sourceLineNo">1119</span><a name="line.1119"></a>
-<span class="sourceLineNo">1120</span>  @Override<a name="line.1120"></a>
-<span class="sourceLineNo">1121</span>  public Cell getNextIndexedKey() {<a name="line.1121"></a>
-<span class="sourceLineNo">1122</span>    return this.heap.getNextIndexedKey();<a name="line.1122"></a>
-<span class="sourceLineNo">1123</span>  }<a name="line.1123"></a>
-<span class="sourceLineNo">1124</span><a name="line.1124"></a>
-<span class="sourceLineNo">1125</span>  @Override<a name="line.1125"></a>
-<span class="sourceLineNo">1126</span>  public void shipped() throws IOException {<a name="line.1126"></a>
-<span class="sourceLineNo">1127</span>    if (prevCell != null) {<a name="line.1127"></a>
-<span class="sourceLineNo">1128</span>      // Do the copy here so that in case the prevCell ref is pointing to the previous<a name="line.1128"></a>
-<span class="sourceLineNo">1129</span>      // blocks we can safely release those blocks.<a name="line.1129"></a>
-<span class="sourceLineNo">1130</span>      // This applies to blocks that are got from Bucket cache, L1 cache and the blocks<a name="line.1130"></a>
-<span class="sourceLineNo">1131</span>      // fetched from HDFS. Copying this would ensure that we let go the references to these<a name="line.1131"></a>
-<span class="sourceLineNo">1132</span>      // blocks so that they can be GCed safely(in case of bucket cache)<a name="line.1132"></a>
-<span class="sourceLineNo">1133</span>      prevCell = KeyValueUtil.toNewKeyCell(this.prevCell);<a name="line.1133"></a>
-<span class="sourceLineNo">1134</span>    }<a name="line.1134"></a>
-<span class="sourceLineNo">1135</span>    matcher.beforeShipped();<a name="line.1135"></a>
-<span class="sourceLineNo">1136</span>    // There wont be further fetch of Cells from these scanners. Just close.<a name="line.1136"></a>
-<span class="sourceLineNo">1137</span>    clearAndClose(scannersForDelayedClose);<a name="line.1137"></a>
-<span class="sourceLineNo">1138</span>    if (this.heap != null) {<a name="line.1138"></a>
-<span class="sourceLineNo">1139</span>      this.heap.shipped();<a name="line.1139"></a>
-<span class="sourceLineNo">1140</span>      // When switching from pread to stream, we will open a new scanner for each store file, but<a name="line.1140"></a>
-<span class="sourceLineNo">1141</span>      // the old scanner may still track the HFileBlocks we have scanned but not sent back to client<a name="line.1141"></a>
-<span class="sourceLineNo">1142</span>      // yet. If we close the scanner immediately then the HFileBlocks may be messed up by others<a name="line.1142"></a>
-<span class="sourceLineNo">1143</span>      // before we serialize and send it back to client. The HFileBlocks will be released in shipped<a name="line.1143"></a>
-<span class="sourceLineNo">1144</span>      // method, so we here will also open new scanners and close old scanners in shipped method.<a name="line.1144"></a>
-<span class="sourceLineNo">1145</span>      // See HBASE-18055 for more details.<a name="line.1145"></a>
-<span class="sourceLineNo">1146</span>      trySwitchToStreamRead();<a name="line.1146"></a>
-<span class="sourceLineNo">1147</span>    }<a name="line.1147"></a>
-<span class="sourceLineNo">1148</span>  }<a name="line.1148"></a>
-<span class="sourceLineNo">1149</span>}<a name="line.1149"></a>
-<span class="sourceLineNo">1150</span><a name="line.1150"></a>
+<span class="sourceLineNo">1113</span>  @Override<a name="line.1113"></a>
+<span class="sourceLineNo">1114</span>  public Cell getNextIndexedKey() {<a name="line.1114"></a>
+<span class="sourceLineNo">1115</span>    return this.heap.getNextIndexedKey();<a name="line.1115"></a>
+<span class="sourceLineNo">1116</span>  }<a name="line.1116"></a>
+<span class="sourceLineNo">1117</span><a name="line.1117"></a>
+<span class="sourceLineNo">1118</span>  @Override<a name="line.1118"></a>
+<span class="sourceLineNo">1119</span>  public void shipped() throws IOException {<a name="line.1119"></a>
+<span class="sourceLineNo">1120</span>    if (prevCell != null) {<a name="line.1120"></a>
+<span class="sourceLineNo">1121</span>      // Do the copy here so that in case the prevCell ref is pointing to the previous<a name="line.1121"></a>
+<span class="sourceLineNo">1122</span>      // blocks we can safely release those blocks.<a name="line.1122"></a>
+<span class="sourceLineNo">1123</span>      // This applies to blocks that are got from Bucket cache, L1 cache and the blocks<a name="line.1123"></a>
+<span class="sourceLineNo">1124</span>      // fetched from HDFS. Copying this would ensure that we let go the references to these<a name="line.1124"></a>
+<span class="sourceLineNo">1125</span>      // blocks so that they can be GCed safely(in case of bucket cache)<a name="line.1125"></a>
+<span class="sourceLineNo">1126</span>      prevCell = KeyValueUtil.toNewKeyCell(this.prevCell);<a name="line.1126"></a>
+<span class="sourceLineNo">1127</span>    }<a name="line.1127"></a>
+<span class="sourceLineNo">1128</span>    matcher.beforeShipped();<a name="line.1128"></a>
+<span class="sourceLineNo">1129</span>    // There wont be further fetch of Cells from these scanners. Just close.<a name="line.1129"></a>
+<span class="sourceLineNo">1130</span>    clearAndClose(scannersForDelayedClose);<a name="line.1130"></a>
+<span class="sourceLineNo">1131</span>    if (this.heap != null) {<a name="line.1131"></a>
+<span class="sourceLineNo">1132</span>      this.heap.shipped();<a name="line.1132"></a>
+<span class="sourceLineNo">1133</span>      // When switching from pread to stream, we will open a new scanner for each store file, but<a name="line.1133"></a>
+<span class="sourceLineNo">1134</span>      // the old scanner may still track the HFileBlocks we have scanned but not sent back to client<a name="line.1134"></a>
+<span class="sourceLineNo">1135</span>      // yet. If we close the scanner immediately then the HFileBlocks may be messed up by others<a name="line.1135"></a>
+<span class="sourceLineNo">1136</span>      // before we serialize and send it back to client. The HFileBlocks will be released in shipped<a name="line.1136"></a>
+<span class="sourceLineNo">1137</span>      // method, so we here will also open new scanners and close old scanners in shipped method.<a name="line.1137"></a>
+<span class="sourceLineNo">1138</span>      // See HBASE-18055 for more details.<a name="line.1138"></a>
+<span class="sourceLineNo">1139</span>      trySwitchToStreamRead();<a name="line.1139"></a>
+<span class="sourceLineNo">1140</span>    }<a name="line.1140"></a>
+<span class="sourceLineNo">1141</span>  }<a name="line.1141"></a>
+<span class="sourceLineNo">1142</span>}<a name="line.1142"></a>
+<span class="sourceLineNo">1143</span><a name="line.1143"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
index 6d91b4d..c961ffa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
@@ -124,19 +124,12 @@
 <span class="sourceLineNo">116</span>    return false;<a name="line.116"></a>
 <span class="sourceLineNo">117</span>  }<a name="line.117"></a>
 <span class="sourceLineNo">118</span><a name="line.118"></a>
-<span class="sourceLineNo">119</span>  /**<a name="line.119"></a>
-<span class="sourceLineNo">120</span>   * @see org.apache.hadoop.hbase.regionserver.KeyValueScanner#getScannerOrder()<a name="line.120"></a>
-<span class="sourceLineNo">121</span>   */<a name="line.121"></a>
-<span class="sourceLineNo">122</span>  @Override<a name="line.122"></a>
-<span class="sourceLineNo">123</span>  public long getScannerOrder() {<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    return 0;<a name="line.124"></a>
-<span class="sourceLineNo">125</span>  }<a name="line.125"></a>
-<span class="sourceLineNo">126</span><a name="line.126"></a>
-<span class="sourceLineNo">127</span>  @Override<a name="line.127"></a>
-<span class="sourceLineNo">128</span>  public void close() {<a name="line.128"></a>
-<span class="sourceLineNo">129</span>    // do nothing<a name="line.129"></a>
-<span class="sourceLineNo">130</span>  }<a name="line.130"></a>
-<span class="sourceLineNo">131</span>}<a name="line.131"></a>
+<span class="sourceLineNo">119</span><a name="line.119"></a>
+<span class="sourceLineNo">120</span>  @Override<a name="line.120"></a>
+<span class="sourceLineNo">121</span>  public void close() {<a name="line.121"></a>
+<span class="sourceLineNo">122</span>    // do nothing<a name="line.122"></a>
+<span class="sourceLineNo">123</span>  }<a name="line.123"></a>
+<span class="sourceLineNo">124</span>}<a name="line.124"></a>
 
 
 


[40/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/checkstyle-aggregate.html
----------------------------------------------------------------------
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 88e816b..86842ab 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Checkstyle Results</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.5-HBASE.min.css" />
@@ -284,7 +284,7 @@
 <td>3604</td>
 <td>0</td>
 <td>0</td>
-<td>15865</td></tr></table></div>
+<td>15864</td></tr></table></div>
 <div class="section">
 <h2><a name="Files"></a>Files</h2>
 <table border="0" class="table table-striped">
@@ -6897,7 +6897,7 @@
 <td><a href="#org.apache.hadoop.hbase.regionserver.SegmentScanner.java">org/apache/hadoop/hbase/regionserver/SegmentScanner.java</a></td>
 <td>0</td>
 <td>0</td>
-<td>3</td></tr>
+<td>2</td></tr>
 <tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ServerNonceManager.java">org/apache/hadoop/hbase/regionserver/ServerNonceManager.java</a></td>
 <td>0</td>
@@ -9497,7 +9497,7 @@
 <td><a href="#org.apache.hadoop.hbase.util.HBaseFsck.java">org/apache/hadoop/hbase/util/HBaseFsck.java</a></td>
 <td>0</td>
 <td>0</td>
-<td>97</td></tr>
+<td>99</td></tr>
 <tr class="a">
 <td><a href="#org.apache.hadoop.hbase.util.HBaseFsckRepair.java">org/apache/hadoop/hbase/util/HBaseFsckRepair.java</a></td>
 <td>0</td>
@@ -9937,7 +9937,7 @@
 <td><a href="#org.apache.hadoop.hbase.util.VersionInfo.java">org/apache/hadoop/hbase/util/VersionInfo.java</a></td>
 <td>0</td>
 <td>0</td>
-<td>3</td></tr>
+<td>1</td></tr>
 <tr class="a">
 <td><a href="#org.apache.hadoop.hbase.util.Writables.java">org/apache/hadoop/hbase/util/Writables.java</a></td>
 <td>0</td>
@@ -10297,7 +10297,7 @@
 <li>sortStaticImportsAlphabetically: <tt>&quot;true&quot;</tt></li>
 <li>groups: <tt>&quot;*,org.apache.hbase.thirdparty,org.apache.hadoop.hbase.shaded&quot;</tt></li>
 <li>option: <tt>&quot;top&quot;</tt></li></ul></td>
-<td>1222</td>
+<td>1223</td>
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td></tr>
 <tr class="a">
 <td></td>
@@ -10327,7 +10327,7 @@
 <td><a class="externalLink" href="http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation">JavadocTagContinuationIndentation</a>
 <ul>
 <li>offset: <tt>&quot;2&quot;</tt></li></ul></td>
-<td>798</td>
+<td>797</td>
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td></tr>
 <tr class="a">
 <td></td>
@@ -10337,7 +10337,7 @@
 <tr class="b">
 <td>misc</td>
 <td><a class="externalLink" href="http://checkstyle.sourceforge.net/config_misc.html#ArrayTypeStyle">ArrayTypeStyle</a></td>
-<td>151</td>
+<td>149</td>
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td></tr>
 <tr class="a">
 <td></td>
@@ -10350,7 +10350,7 @@
 <ul>
 <li>max: <tt>&quot;100&quot;</tt></li>
 <li>ignorePattern: <tt>&quot;^package.*|^import.*|a href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated&quot;</tt></li></ul></td>
-<td>1615</td>
+<td>1616</td>
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td></tr>
 <tr class="a">
 <td></td>
@@ -78350,13 +78350,13 @@
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
-<td>153</td></tr>
+<td>151</td></tr>
 <tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 102).</td>
-<td>299</td></tr></table></div>
+<td>297</td></tr></table></div>
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter.java">org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java</h3>
 <table border="0" class="table table-striped">
@@ -78980,25 +78980,25 @@
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def modifier' have incorrect indentation level 3, expected level should be 2.</td>
-<td>374</td></tr>
+<td>370</td></tr>
 <tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def' child have incorrect indentation level 5, expected level should be 4.</td>
-<td>376</td></tr>
+<td>372</td></tr>
 <tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def rcurly' have incorrect indentation level 3, expected level should be 2.</td>
-<td>377</td></tr>
+<td>373</td></tr>
 <tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
-<td>514</td></tr></table></div>
+<td>510</td></tr></table></div>
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.CompactionPipeline.java">org/apache/hadoop/hbase/regionserver/CompactionPipeline.java</h3>
 <table border="0" class="table table-striped">
@@ -79091,7 +79091,7 @@
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
-<td>142</td></tr></table></div>
+<td>134</td></tr></table></div>
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java</h3>
 <table border="0" class="table table-striped">
@@ -79427,7 +79427,7 @@
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
-<td>144</td></tr></table></div>
+<td>143</td></tr></table></div>
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.java">org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java</h3>
 <table border="0" class="table table-striped">
@@ -85535,68 +85535,62 @@
 <td>30</td></tr>
 <tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
-<td>javadoc</td>
-<td>JavadocTagContinuationIndentation</td>
-<td>Line continuation have incorrect indentation level, expected level should be 2.</td>
-<td>70</td></tr>
-<tr class="b">
-<td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 112).</td>
-<td>284</td></tr></table></div>
+<td>266</td></tr></table></div>
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.ServerNonceManager.java">org/apache/hadoop/hbase/regionserver/ServerNonceManager.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.util.EnvironmentEdgeManager' import.</td>
 <td>33</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>152</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>157</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>182</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>243</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>246</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>278</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -85605,13 +85599,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.ShipperListener.java">org/apache/hadoop/hbase/regionserver/ShipperListener.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -85620,49 +85614,49 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.ShutdownHook.java">org/apache/hadoop/hbase/regionserver/ShutdownHook.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.conf.Configuration' import.</td>
 <td>29</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>HideUtilityClassConstructor</td>
 <td>Utility classes should not have a public or default constructor.</td>
 <td>41</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>76</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
 <td>81</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'this' have incorrect indentation level 14, expected level should be 16.</td>
 <td>127</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>256</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -85671,31 +85665,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory.java">org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.ipc.PriorityFunction' import.</td>
 <td>26</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>annotation</td>
 <td>MissingDeprecated</td>
 <td>Must include both @java.lang.Deprecated annotation and @deprecated Javadoc tag with description.</td>
 <td>35</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def' child have incorrect indentation level 10, expected level should be 4.</td>
 <td>38</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>FileTabCharacter</td>
@@ -85704,19 +85698,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.SplitLogWorker.java">org/apache/hadoop/hbase/regionserver/SplitLogWorker.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.conf.Configuration' import.</td>
 <td>29</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
@@ -85725,13 +85719,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.SplitRequest.java">org/apache/hadoop/hbase/regionserver/SplitRequest.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>MethodParamPad</td>
@@ -85740,25 +85734,25 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy.java">org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
 <td>26</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
 <td>27</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
@@ -85767,31 +85761,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.Store.java">org/apache/hadoop/hbase/regionserver/Store.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
 <td>97</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
 <td>203</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Line continuation have incorrect indentation level, expected level should be 2.</td>
 <td>204</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -85800,19 +85794,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFileComparators.java">org/apache/hadoop/hbase/regionserver/StoreFileComparators.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'java.util.Comparator' import.</td>
 <td>24</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>HideUtilityClassConstructor</td>
@@ -85821,145 +85815,145 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFileInfo.java">org/apache/hadoop/hbase/regionserver/StoreFileInfo.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.io.FSDataInputStreamWrapper' import.</td>
 <td>36</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>124</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>135</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>178</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>179</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>180</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>181</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>182</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>197</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>471</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>485</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>534</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>535</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>537</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>540</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>541</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>542</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>545</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>546</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>547</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>550</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>551</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -85968,19 +85962,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFileManager.java">org/apache/hadoop/hbase/regionserver/StoreFileManager.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>68</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -85989,31 +85983,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFileReader.java">org/apache/hadoop/hbase/regionserver/StoreFileReader.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.KeyValue' import.</td>
 <td>39</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>469</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>489</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -86022,67 +86016,67 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFileScanner.java">org/apache/hadoop/hbase/regionserver/StoreFileScanner.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.client.Scan' import.</td>
 <td>40</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>223</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>251</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>308</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>319</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>320</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>322</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>464</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>513</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -86091,85 +86085,85 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFileWriter.java">org/apache/hadoop/hbase/regionserver/StoreFileWriter.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.KeyValue' import.</td>
 <td>40</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>FinalClass</td>
 <td>Class StoreFileWriter should be declared as final.</td>
 <td>61</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>85</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'ctor def modifier' have incorrect indentation level 4, expected level should be 2.</td>
 <td>90</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'case' child have incorrect indentation level 6, expected level should be 8.</td>
 <td>119</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'block' child have incorrect indentation level 8, expected level should be 10.</td>
 <td>120</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'block' child have incorrect indentation level 8, expected level should be 10.</td>
 <td>121</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'case' child have incorrect indentation level 6, expected level should be 8.</td>
 <td>122</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'block' child have incorrect indentation level 8, expected level should be 10.</td>
 <td>123</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'block' child have incorrect indentation level 8, expected level should be 10.</td>
 <td>124</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'case' child have incorrect indentation level 6, expected level should be 8.</td>
 <td>125</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'block' child have incorrect indentation level 8, expected level should be 10.</td>
 <td>126</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -86178,31 +86172,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFlushContext.java">org/apache/hadoop/hbase/regionserver/StoreFlushContext.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>60</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>61</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>71</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -86211,31 +86205,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreFlusher.java">org/apache/hadoop/hbase/regionserver/StoreFlusher.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>UnusedImports</td>
 <td>Unused import - java.util.OptionalInt.</td>
 <td>25</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>77</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>78</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
@@ -86244,160 +86238,160 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreScanner.java">org/apache/hadoop/hbase/regionserver/StoreScanner.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.KeyValue' import.</td>
 <td>37</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'ctor def' child have incorrect indentation level 5, expected level should be 4.</td>
 <td>182</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'ctor def' child have incorrect indentation level 5, expected level should be 4.</td>
 <td>183</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>225</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>369</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>370</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>373</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>508</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>509</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>MethodLength</td>
 <td>Method length is 189 lines (max allowed is 150).</td>
 <td>512</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>InnerAssignment</td>
 <td>Inner assignments should be avoided.</td>
 <td>692</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>InnerAssignment</td>
 <td>Inner assignments should be avoided.</td>
 <td>799</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>InnerAssignment</td>
 <td>Inner assignments should be avoided.</td>
 <td>820</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>940</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>941</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>942</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>943</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>959</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>961</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
-<td>1061</td></tr>
-<tr class="b">
+<td>1054</td></tr>
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
-<td>1065</td></tr>
-<tr class="a">
+<td>1058</td></tr>
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
-<td>1102</td></tr>
-<tr class="b">
+<td>1095</td></tr>
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'for' construct must use '{}'s.</td>
-<td>1104</td></tr></table></div>
+<td>1097</td></tr></table></div>
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StoreUtils.java">org/apache/hadoop/hbase/regionserver/StoreUtils.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>HideUtilityClassConstructor</td>
@@ -86406,25 +86400,25 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StorefileRefresherChore.java">org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner' import.</td>
 <td>31</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>94</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
@@ -86433,19 +86427,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter.java">org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
 <td>Wrong order for 'org.apache.hadoop.hbase.util.Bytes' import.</td>
 <td>34</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -86454,13 +86448,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StripeStoreConfig.java">org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
@@ -86469,13 +86463,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StripeStoreEngine.java">org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>ImportOrder</td>
@@ -86484,97 +86478,97 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.java">org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>VisibilityModifier</td>
 <td>Variable 'stripeEndRows' must be private and have accessor methods.</td>
 <td>95</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>VisibilityModifier</td>
 <td>Variable 'stripeFiles' must be private and have accessor methods.</td>
 <td>102</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>VisibilityModifier</td>
 <td>Variable 'level0Files' must be private and have accessor methods.</td>
 <td>104</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>VisibilityModifier</td>
 <td>Variable 'allFilesCached' must be private and have accessor methods.</td>
 <td>107</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>512</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>513</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>517</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>587</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>595</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>736</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 107).</td>
 <td>901</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>918</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>922</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>964</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -86583,13 +86577,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.StripeStoreFlusher.java">org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -86598,79 +86592,79 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java">org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>119</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>120</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'&quot; The Universe, and Everything&quot;' have incorrect indentation level 4, expected level should be 6.</td>
 <td>138</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>239</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>MethodParamPad</td>
 <td>'(' is preceded with whitespace.</td>
 <td>249</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>MethodParamPad</td>
 <td>'(' is preceded with whitespace.</td>
 <td>255</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 101).</td>
 <td>346</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 101).</td>
 <td>347</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>531</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>EmptyStatement</td>
 <td>Empty statement.</td>
 <td>532</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'ctor def rcurly' have incorrect indentation level 3, expected level should be 4.</td>
 <td>653</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>design</td>
 <td>FinalClass</td>
@@ -86679,103 +86673,103 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestBlocksRead.java">org/apache/hadoop/hbase/regionserver/TestBlocksRead.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>AvoidStarImport</td>
 <td>Using the '.*' form of import should be avoided - org.junit.*.</td>
 <td>49</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 6, expected level should be one of the following: 4, 58.</td>
 <td>67</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>88</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>89</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>90</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>91</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>92</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>129</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 110).</td>
 <td>199</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def' child have incorrect indentation level 6, expected level should be 4.</td>
 <td>203</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>213</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>219</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>269</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>275</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>376</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
@@ -86784,37 +86778,37 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestBlocksScanned.java">org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method call rparen' have incorrect indentation level 8, expected level should be 4.</td>
 <td>79</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method call rparen' have incorrect indentation level 8, expected level should be 4.</td>
 <td>95</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>114</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>EmptyStatement</td>
 <td>Empty statement.</td>
 <td>114</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
@@ -86823,25 +86817,25 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestBulkLoad.java">org/apache/hadoop/hbase/regionserver/TestBulkLoad.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'object def rcurly' have incorrect indentation level 4, expected level should be one of the following: 12, 14.</td>
 <td>130</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'object def rcurly' have incorrect indentation level 4, expected level should be one of the following: 12, 14.</td>
 <td>156</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
@@ -86850,19 +86844,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.java">org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>MissingSwitchDefault</td>
 <td>switch without &quot;default&quot; clause.</td>
 <td>116</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>LeftCurly</td>
@@ -86871,43 +86865,43 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCellFlatSet.java">org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>65</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>67</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 101).</td>
 <td>89</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 104).</td>
 <td>205</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>285</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
@@ -86916,13 +86910,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCellSkipListSet.java">org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -86931,19 +86925,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestClearRegionBlockCache.java">org/apache/hadoop/hbase/regionserver/TestClearRegionBlockCache.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>175</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>EmptyStatement</td>
@@ -86952,43 +86946,43 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestColumnSeeking.java">org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>150</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>166</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>EmptyStatement</td>
 <td>Empty statement.</td>
 <td>167</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>262</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>278</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>coding</td>
 <td>EmptyStatement</td>
@@ -86997,19 +86991,19 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCompactSplitThread.java">org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>AvoidStarImport</td>
 <td>Using the '.*' form of import should be avoided - org.apache.hadoop.hbase.*.</td>
 <td>26</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>AvoidStarImport</td>
@@ -87018,103 +87012,103 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCompactingMemStore.java">org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 103).</td>
 <td>95</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>129</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>130</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>170</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>190</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 105).</td>
 <td>216</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>278</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>317</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'try' child have incorrect indentation level 5, expected level should be 6.</td>
 <td>340</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>653</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def' child have incorrect indentation level 12, expected level should be 6.</td>
 <td>872</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def rcurly' have incorrect indentation level 8, expected level should be 4.</td>
 <td>873</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def modifier' have incorrect indentation level 6, expected level should be 4.</td>
 <td>874</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def' child have incorrect indentation level 8, expected level should be 6.</td>
 <td>875</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def rcurly' have incorrect indentation level 6, expected level should be 4.</td>
 <td>876</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
@@ -87123,103 +87117,103 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCompactingToCellFlatMapMemStore.java">org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>131</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>165</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>188</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>322</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>333</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>341</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>ParenPad</td>
 <td>'(' is followed by whitespace.</td>
 <td>356</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 103).</td>
 <td>401</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'lambda arguments' have incorrect indentation level 8, expected level should be 6.</td>
 <td>403</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'lambda arguments' have incorrect indentation level 8, expected level should be 6.</td>
 <td>405</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 103).</td>
 <td>421</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'lambda arguments' have incorrect indentation level 8, expected level should be 6.</td>
 <td>423</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'lambda arguments' have incorrect indentation level 8, expected level should be 6.</td>
 <td>425</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 103).</td>
 <td>438</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'lambda arguments' have incorrect indentation level 8, expected level should be 6.</td>
 <td>440</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
@@ -87228,31 +87222,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCompaction.java">org/apache/hadoop/hbase/regionserver/TestCompaction.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>152</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 104).</td>
 <td>198</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>455</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
@@ -87261,49 +87255,49 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCompactionState.java">org/apache/hadoop/hbase/regionserver/TestCompactionState.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>126</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>127</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>128</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>130</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>131</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>194</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -87312,109 +87306,109 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestCompoundBloomFilter.java">org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>81</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 6, expected level should be one of the following: 4, 51.</td>
 <td>82</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 6, expected level should be one of the following: 4, 51.</td>
 <td>83</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>85</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>92</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 8, expected level should be one of the following: 6, 30.</td>
 <td>96</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>105</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 6, expected level should be one of the following: 4, 51.</td>
 <td>106</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>LeftCurly</td>
 <td>'{' at column 10 should have line break after.</td>
 <td>107</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>109</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 6, expected level should be one of the following: 4, 55.</td>
 <td>110</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>LeftCurly</td>
 <td>'{' at column 10 should have line break after.</td>
 <td>111</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'for' construct must use '{}'s.</td>
 <td>117</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>JavadocTagContinuationIndentation</td>
 <td>Javadoc comment at column 33 has parse error. Missed HTML close tag 'number'. Sometimes it means that close tag missed for one of previous tags.</td>
 <td>127</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'for' construct must use '{}'s.</td>
 <td>149</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>330</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
@@ -87423,37 +87417,37 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestDateTieredCompactionPolicy.java">org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 8, expected level should be one of the following: 6, 82.</td>
 <td>86</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 8, expected level should be one of the following: 6, 83.</td>
 <td>194</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'array initialization' child have incorrect indentation level 8, expected level should be one of the following: 6, 82.</td>
 <td>208</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>245</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
@@ -87462,13 +87456,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestDateTieredCompactionPolicyOverflow.java">org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicyOverflow.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
@@ -87477,109 +87471,109 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestDefaultMemStore.java">org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>159</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 103).</td>
 <td>169</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>235</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>236</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>294</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>misc</td>
 <td>ArrayTypeStyle</td>
 <td>Array brackets at illegal position.</td>
 <td>507</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>526</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>565</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>819</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>859</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>902</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>976</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>1008</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>1018</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>whitespace</td>
 <td>MethodParamPad</td>
 <td>'(' should be on the previous line.</td>
 <td>1039</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>1086</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -87588,13 +87582,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestDeleteMobTable.java">org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>imports</td>
 <td>AvoidStarImport</td>
@@ -87603,31 +87597,31 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction.java">org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 123).</td>
 <td>229</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 123).</td>
 <td>231</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>LineLength</td>
 <td>Line is longer than 100 characters (found 102).</td>
 <td>346</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -87636,13 +87630,13 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestFSErrorsExposed.java">org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -87651,55 +87645,55 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestFailedAppendAndSync.java">org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java</h3>
 <table border="0" class="table table-striped">
-<tr class="a">
+<tr class="b">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>sizes</td>
 <td>MethodLength</td>
 <td>Method length is 154 lines (max allowed is 150).</td>
 <td>111</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def' child have incorrect indentation level 10, expected level should be 8.</td>
 <td>134</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'method def rcurly' have incorrect indentation level 10, expected level should be 6.</td>
 <td>161</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>indentation</td>
 <td>Indentation</td>
 <td>'class def rcurly' have incorrect indentation level 6, expected level should be 4.</td>
 <td>162</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>205</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'while' construct must use '{}'s.</td>
 <td>220</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>252</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
@@ -87708,73 +87702,73 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestHMobStore.java">org/apache/hadoop/hbase/regionserver/TestHMobStore.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>121</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>212</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>247</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>291</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>337</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>381</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>471</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>472</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>483</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>484</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
@@ -87783,325 +87777,325 @@
 <div class="section">
 <h3 id="org.apache.hadoop.hbase.regionserver.TestHRegion.java">org/apache/hadoop/hbase/regionserver/TestHRegion.java</h3>
 <table border="0" class="table table-striped">
-<tr class="b">
+<tr class="a">
 <th>Severity</th>
 <th>Category</th>
 <th>Rule</th>
 <th>Message</th>
 <th>Line</th></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>258</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>293</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>javadoc</td>
 <td>NonEmptyAtclauseDescription</td>
 <td>At-clause should have a non-empty description.</td>
 <td>438</td></tr>
-<tr class="b">
+<tr class="a">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>NeedBraces</td>
 <td>'if' construct must use '{}'s.</td>
 <td>1401</td></tr>
-<tr class="a">
+<tr class="b">
 <td><img src="images/icon_error_sml.gif" alt="" />&#160;Error</td>
 <td>blocks</td>
 <td>

<TRUNCATED>

[33/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Segment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Segment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Segment.html
index c2254c2..04631c4 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Segment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Segment.html
@@ -230,10 +230,9 @@
 </tr>
 <tbody>
 <tr class="altColor">
-<td class="colFirst"><code>protected static long</code></td>
-<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">addToScanners</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
+<td class="colFirst"><code>protected static void</code></td>
+<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">addToScanners</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
              long&nbsp;readPt,
-             long&nbsp;order,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
@@ -254,10 +253,9 @@
 </tr>
 <tbody>
 <tr class="altColor">
-<td class="colFirst"><code>static long</code></td>
-<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-long-java.util.List-">addToScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
+<td class="colFirst"><code>static void</code></td>
+<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-java.util.List-">addToScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
              long&nbsp;readPt,
-             long&nbsp;order,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
@@ -303,12 +301,9 @@
 </tr>
 <tr class="rowColor">
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-">SegmentScanner</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
-              long&nbsp;readPoint)</code>&nbsp;</td>
-</tr>
-<tr class="altColor">
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-long-">SegmentScanner</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
-              long&nbsp;readPoint,
-              long&nbsp;scannerOrder)</code>&nbsp;</td>
+              long&nbsp;readPoint)</code>
+<div class="block">Scanners are ordered from 0 (oldest) to newest in increasing order.</div>
+</td>
 </tr>
 </tbody>
 </table>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 8a9d4c2..284abae 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -705,19 +705,19 @@
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScannerContext.LimitScope</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScanType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScanType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">HRegion.FlushResult.Result</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.StepDirection.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">DefaultHeapMemoryTuner.StepDirection</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">CompactingMemStore.IndexType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MemStoreCompactionStrategy.Action</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/BloomType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">BloomType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/FlushType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">FlushType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ChunkCreator.ChunkType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MetricsRegionServerSourceFactoryImpl.FactoryStorage</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScannerContext.NextState</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/BloomType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">BloomType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Region.Operation.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">Region.Operation</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">HRegion.FlushResult.Result</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScanType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScanType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MemStoreCompactionStrategy.Action</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.StepDirection.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">DefaultHeapMemoryTuner.StepDirection</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.Type.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TimeRangeTracker.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">SplitLogWorker.TaskExecutor.Status</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.Type.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TimeRangeTracker.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MetricsRegionServerSourceFactoryImpl.FactoryStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Region.Operation.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">Region.Operation</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ChunkCreator.ChunkType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index b377318..23060c2 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -131,8 +131,8 @@
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
 <li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.DropDeletesInOutput.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">StripeCompactionScanQueryMatcher.DropDeletesInOutput</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">ScanQueryMatcher.MatchCode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.DeleteResult.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">DeleteTracker.DeleteResult</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">ScanQueryMatcher.MatchCode</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index f6fc79b..a4ab1b7 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -238,8 +238,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrResult.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">ProtobufLogReader.WALHdrResult</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.Type.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">RingBufferTruck.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrResult.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">ProtobufLogReader.WALHdrResult</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
index 5efcbb8..732825f 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
@@ -199,8 +199,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.replication.regionserver.<a href="../../../../../../org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html" title="enum in org.apache.hadoop.hbase.replication.regionserver"><span class="typeNameLink">ReplicationSourceShipper.WorkerState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.replication.regionserver.<a href="../../../../../../org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.SourceHolder.html" title="enum in org.apache.hadoop.hbase.replication.regionserver"><span class="typeNameLink">MetricsReplicationSourceFactoryImpl.SourceHolder</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.replication.regionserver.<a href="../../../../../../org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html" title="enum in org.apache.hadoop.hbase.replication.regionserver"><span class="typeNameLink">ReplicationSourceShipper.WorkerState</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html b/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
index 034077c..c20ff47 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
@@ -110,8 +110,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.rest.model.<a href="../../../../../../org/apache/hadoop/hbase/rest/model/ScannerModel.FilterModel.ByteArrayComparableModel.ComparatorType.html" title="enum in org.apache.hadoop.hbase.rest.model"><span class="typeNameLink">ScannerModel.FilterModel.ByteArrayComparableModel.ComparatorType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.rest.model.<a href="../../../../../../org/apache/hadoop/hbase/rest/model/ScannerModel.FilterModel.FilterType.html" title="enum in org.apache.hadoop.hbase.rest.model"><span class="typeNameLink">ScannerModel.FilterModel.FilterType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.rest.model.<a href="../../../../../../org/apache/hadoop/hbase/rest/model/ScannerModel.FilterModel.ByteArrayComparableModel.ComparatorType.html" title="enum in org.apache.hadoop.hbase.rest.model"><span class="typeNameLink">ScannerModel.FilterModel.ByteArrayComparableModel.ComparatorType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index d9fa142..d02c856 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -137,9 +137,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/AccessController.OpType.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">AccessController.OpType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/Permission.Action.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">Permission.Action</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/AccessControlFilter.Strategy.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">AccessControlFilter.Strategy</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/Permission.Action.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">Permission.Action</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/AccessController.OpType.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">AccessController.OpType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
index 13f949c..67b7e3a 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
@@ -191,8 +191,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/SaslUtil.QualityOfProtection.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">SaslUtil.QualityOfProtection</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/AuthMethod.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">AuthMethod</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/SaslUtil.QualityOfProtection.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">SaslUtil.QualityOfProtection</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/SaslStatus.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">SaslStatus</span></a></li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/CollectionBackedScanner.html b/devapidocs/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
index 8cd85aa..8b24fa4 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/CollectionBackedScanner.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -222,34 +222,28 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/NonReversed
 </td>
 </tr>
 <tr id="i1" class="rowColor">
-<td class="colFirst"><code>long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/CollectionBackedScanner.html#getScannerOrder--">getScannerOrder</a></span>()</code>
-<div class="block">Get the order of this KeyValueScanner.</div>
-</td>
-</tr>
-<tr id="i2" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/CollectionBackedScanner.html#init--">init</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i3" class="rowColor">
+<tr id="i2" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/CollectionBackedScanner.html#next--">next</a></span>()</code>
 <div class="block">Return the next Cell in this scanner, iterating the scanner</div>
 </td>
 </tr>
-<tr id="i4" class="altColor">
+<tr id="i3" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/CollectionBackedScanner.html#peek--">peek</a></span>()</code>
 <div class="block">Look at the next Cell in this scanner, but do not iterate scanner.</div>
 </td>
 </tr>
-<tr id="i5" class="rowColor">
+<tr id="i4" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/CollectionBackedScanner.html#reseek-org.apache.hadoop.hbase.Cell-">reseek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;seekCell)</code>
 <div class="block">Reseek the scanner at or after the specified KeyValue.</div>
 </td>
 </tr>
-<tr id="i6" class="altColor">
+<tr id="i5" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/CollectionBackedScanner.html#seek-org.apache.hadoop.hbase.Cell-">seek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;seekCell)</code>
 <div class="block">Seek the scanner at or after the specified KeyValue.</div>
@@ -277,6 +271,13 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/NonReversed
 <h3>Methods inherited from class&nbsp;java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></h3>
 <code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--" title="class or interface in java.lang">clone</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-" title="class or interface in java.lang">equals</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--" title="class or interface in java.lang">finalize</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--" title="class or interface in java.lang">getClass</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--" title="class or interface in java.lang">hashCode</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--" title="class or interface in java.lang">notify</a>, <a href="https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--" title="class or interface in java.lang">wait</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-" title="class or interface in java.lang">wait</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-" title="class or interface in java.lang">wait</a></code></li>
 </ul>
+<ul class="blockList">
+<li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.KeyValueScanner">
+<!--   -->
+</a>
+<h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></h3>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></code></li>
+</ul>
 </li>
 </ul>
 </li>
@@ -469,32 +470,13 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/NonReversed
 </dl>
 </li>
 </ul>
-<a name="getScannerOrder--">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>getScannerOrder</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html#line.123">getScannerOrder</a>()</pre>
-<div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">KeyValueScanner</a></code></span></div>
-<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and
- MemStoreScanners (other scanners simply return 0). This is required for comparing multiple
- files to find out which one has the latest data. StoreFileScanners are ordered from 0
- (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always
- contains freshest data.</div>
-<dl>
-<dt><span class="seeLabel">See Also:</span></dt>
-<dd><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--"><code>KeyValueScanner.getScannerOrder()</code></a></dd>
-</dl>
-</li>
-</ul>
 <a name="close--">
 <!--   -->
 </a>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>close</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html#line.128">close</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/CollectionBackedScanner.html#line.121">close</a>()</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#close--">KeyValueScanner</a></code></span></div>
 <div class="block">Close the KeyValue scanner.</div>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index 6690860..1adeadd 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2050">HBaseFsck.CheckRegionConsistencyWorkItem</a>
+<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2057">HBaseFsck.CheckRegionConsistencyWorkItem</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 </li>
@@ -211,7 +211,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>key</h4>
-<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2051">key</a></pre>
+<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2058">key</a></pre>
 </li>
 </ul>
 <a name="hbi">
@@ -220,7 +220,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hbi</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2052">hbi</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2059">hbi</a></pre>
 </li>
 </ul>
 </li>
@@ -237,7 +237,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>CheckRegionConsistencyWorkItem</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2054">CheckRegionConsistencyWorkItem</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;key,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2061">CheckRegionConsistencyWorkItem</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;key,
                                <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi)</pre>
 </li>
 </ul>
@@ -255,7 +255,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2060">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2067">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index dbf7168..0a79330 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static enum <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4111">HBaseFsck.ErrorReporter.ERROR_CODE</a>
+<pre>public static enum <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4118">HBaseFsck.ErrorReporter.ERROR_CODE</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang">Enum</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;</pre>
 </li>
 </ul>
@@ -244,6 +244,9 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?i
 <td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#UNKNOWN">UNKNOWN</a></span></code>&nbsp;</td>
 </tr>
 <tr class="altColor">
+<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#UNSUPPORTED_OPTION">UNSUPPORTED_OPTION</a></span></code>&nbsp;</td>
+</tr>
+<tr class="rowColor">
 <td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#WRONG_USAGE">WRONG_USAGE</a></span></code>&nbsp;</td>
 </tr>
 </table>
@@ -309,7 +312,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>UNKNOWN</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4112">UNKNOWN</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">UNKNOWN</a></pre>
 </li>
 </ul>
 <a name="NO_META_REGION">
@@ -318,7 +321,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_META_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4112">NO_META_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">NO_META_REGION</a></pre>
 </li>
 </ul>
 <a name="NULL_META_REGION">
@@ -327,7 +330,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NULL_META_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4112">NULL_META_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">NULL_META_REGION</a></pre>
 </li>
 </ul>
 <a name="NO_VERSION_FILE">
@@ -336,7 +339,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_VERSION_FILE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4112">NO_VERSION_FILE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">NO_VERSION_FILE</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_META_HDFS">
@@ -345,7 +348,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_META_HDFS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4112">NOT_IN_META_HDFS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">NOT_IN_META_HDFS</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_META">
@@ -354,7 +357,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_META</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4112">NOT_IN_META</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">NOT_IN_META</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_META_OR_DEPLOYED">
@@ -363,7 +366,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_META_OR_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4113">NOT_IN_META_OR_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">NOT_IN_META_OR_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_HDFS_OR_DEPLOYED">
@@ -372,7 +375,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_HDFS_OR_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4113">NOT_IN_HDFS_OR_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">NOT_IN_HDFS_OR_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_HDFS">
@@ -381,7 +384,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_HDFS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4113">NOT_IN_HDFS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">NOT_IN_HDFS</a></pre>
 </li>
 </ul>
 <a name="SERVER_DOES_NOT_MATCH_META">
@@ -390,7 +393,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>SERVER_DOES_NOT_MATCH_META</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4113">SERVER_DOES_NOT_MATCH_META</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">SERVER_DOES_NOT_MATCH_META</a></pre>
 </li>
 </ul>
 <a name="NOT_DEPLOYED">
@@ -399,7 +402,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4114">NOT_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4121">NOT_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="MULTI_DEPLOYED">
@@ -408,7 +411,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>MULTI_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4115">MULTI_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4122">MULTI_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="SHOULD_NOT_BE_DEPLOYED">
@@ -417,7 +420,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>SHOULD_NOT_BE_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4115">SHOULD_NOT_BE_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4122">SHOULD_NOT_BE_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="MULTI_META_REGION">
@@ -426,7 +429,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>MULTI_META_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4115">MULTI_META_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4122">MULTI_META_REGION</a></pre>
 </li>
 </ul>
 <a name="RS_CONNECT_FAILURE">
@@ -435,7 +438,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>RS_CONNECT_FAILURE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4115">RS_CONNECT_FAILURE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4122">RS_CONNECT_FAILURE</a></pre>
 </li>
 </ul>
 <a name="FIRST_REGION_STARTKEY_NOT_EMPTY">
@@ -444,7 +447,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>FIRST_REGION_STARTKEY_NOT_EMPTY</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4116">FIRST_REGION_STARTKEY_NOT_EMPTY</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4123">FIRST_REGION_STARTKEY_NOT_EMPTY</a></pre>
 </li>
 </ul>
 <a name="LAST_REGION_ENDKEY_NOT_EMPTY">
@@ -453,7 +456,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LAST_REGION_ENDKEY_NOT_EMPTY</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4116">LAST_REGION_ENDKEY_NOT_EMPTY</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4123">LAST_REGION_ENDKEY_NOT_EMPTY</a></pre>
 </li>
 </ul>
 <a name="DUPE_STARTKEYS">
@@ -462,7 +465,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>DUPE_STARTKEYS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4116">DUPE_STARTKEYS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4123">DUPE_STARTKEYS</a></pre>
 </li>
 </ul>
 <a name="HOLE_IN_REGION_CHAIN">
@@ -471,7 +474,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>HOLE_IN_REGION_CHAIN</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4117">HOLE_IN_REGION_CHAIN</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4124">HOLE_IN_REGION_CHAIN</a></pre>
 </li>
 </ul>
 <a name="OVERLAP_IN_REGION_CHAIN">
@@ -480,7 +483,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>OVERLAP_IN_REGION_CHAIN</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4117">OVERLAP_IN_REGION_CHAIN</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4124">OVERLAP_IN_REGION_CHAIN</a></pre>
 </li>
 </ul>
 <a name="REGION_CYCLE">
@@ -489,7 +492,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>REGION_CYCLE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4117">REGION_CYCLE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4124">REGION_CYCLE</a></pre>
 </li>
 </ul>
 <a name="DEGENERATE_REGION">
@@ -498,7 +501,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>DEGENERATE_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4117">DEGENERATE_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4124">DEGENERATE_REGION</a></pre>
 </li>
 </ul>
 <a name="ORPHAN_HDFS_REGION">
@@ -507,7 +510,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>ORPHAN_HDFS_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4118">ORPHAN_HDFS_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4125">ORPHAN_HDFS_REGION</a></pre>
 </li>
 </ul>
 <a name="LINGERING_SPLIT_PARENT">
@@ -516,7 +519,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LINGERING_SPLIT_PARENT</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4118">LINGERING_SPLIT_PARENT</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4125">LINGERING_SPLIT_PARENT</a></pre>
 </li>
 </ul>
 <a name="NO_TABLEINFO_FILE">
@@ -525,7 +528,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_TABLEINFO_FILE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4118">NO_TABLEINFO_FILE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4125">NO_TABLEINFO_FILE</a></pre>
 </li>
 </ul>
 <a name="LINGERING_REFERENCE_HFILE">
@@ -534,7 +537,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LINGERING_REFERENCE_HFILE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4118">LINGERING_REFERENCE_HFILE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4125">LINGERING_REFERENCE_HFILE</a></pre>
 </li>
 </ul>
 <a name="LINGERING_HFILELINK">
@@ -543,7 +546,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LINGERING_HFILELINK</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">LINGERING_HFILELINK</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4126">LINGERING_HFILELINK</a></pre>
 </li>
 </ul>
 <a name="WRONG_USAGE">
@@ -552,7 +555,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>WRONG_USAGE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">WRONG_USAGE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4126">WRONG_USAGE</a></pre>
 </li>
 </ul>
 <a name="EMPTY_META_CELL">
@@ -561,7 +564,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>EMPTY_META_CELL</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">EMPTY_META_CELL</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4126">EMPTY_META_CELL</a></pre>
 </li>
 </ul>
 <a name="EXPIRED_TABLE_LOCK">
@@ -570,7 +573,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>EXPIRED_TABLE_LOCK</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">EXPIRED_TABLE_LOCK</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4126">EXPIRED_TABLE_LOCK</a></pre>
 </li>
 </ul>
 <a name="BOUNDARIES_ERROR">
@@ -579,7 +582,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>BOUNDARIES_ERROR</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4119">BOUNDARIES_ERROR</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4126">BOUNDARIES_ERROR</a></pre>
 </li>
 </ul>
 <a name="ORPHAN_TABLE_STATE">
@@ -588,7 +591,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>ORPHAN_TABLE_STATE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">ORPHAN_TABLE_STATE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4127">ORPHAN_TABLE_STATE</a></pre>
 </li>
 </ul>
 <a name="NO_TABLE_STATE">
@@ -597,7 +600,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_TABLE_STATE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">NO_TABLE_STATE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4127">NO_TABLE_STATE</a></pre>
 </li>
 </ul>
 <a name="UNDELETED_REPLICATION_QUEUE">
@@ -606,16 +609,25 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>UNDELETED_REPLICATION_QUEUE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">UNDELETED_REPLICATION_QUEUE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4127">UNDELETED_REPLICATION_QUEUE</a></pre>
 </li>
 </ul>
 <a name="DUPE_ENDKEYS">
 <!--   -->
 </a>
-<ul class="blockListLast">
+<ul class="blockList">
 <li class="blockList">
 <h4>DUPE_ENDKEYS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4120">DUPE_ENDKEYS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4127">DUPE_ENDKEYS</a></pre>
+</li>
+</ul>
+<a name="UNSUPPORTED_OPTION">
+<!--   -->
+</a>
+<ul class="blockListLast">
+<li class="blockList">
+<h4>UNSUPPORTED_OPTION</h4>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4128">UNSUPPORTED_OPTION</a></pre>
 </li>
 </ul>
 </li>
@@ -632,7 +644,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>values</h4>
-<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3835">values</a>()</pre>
+<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3842">values</a>()</pre>
 <div class="block">Returns an array containing the constants of this enum type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -652,7 +664,7 @@ for (HBaseFsck.ErrorReporter.ERROR_CODE c : HBaseFsck.ErrorReporter.ERROR_CODE.v
 <ul class="blockListLast">
 <li class="blockList">
 <h4>valueOf</h4>
-<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3835">valueOf</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;name)</pre>
+<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3842">valueOf</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;name)</pre>
 <div class="block">Returns the enum constant of this type with the specified name.
 The string must match <i>exactly</i> an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index 003c88e..d3d2c95 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static interface <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4110">HBaseFsck.ErrorReporter</a></pre>
+<pre>public static interface <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4117">HBaseFsck.ErrorReporter</a></pre>
 </li>
 </ul>
 </div>
@@ -234,7 +234,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>clear</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4122">clear</a>()</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4130">clear</a>()</pre>
 </li>
 </ul>
 <a name="report-java.lang.String-">
@@ -243,7 +243,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>report</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4123">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4131">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
 <a name="reportError-java.lang.String-">
@@ -252,7 +252,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4124">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4132">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
 <a name="reportError-org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE-java.lang.String-">
@@ -261,7 +261,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4125">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4133">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
@@ -271,7 +271,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4126">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4134">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 </li>
@@ -282,7 +282,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4127">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4135">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info)</pre>
@@ -294,7 +294,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4128">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4136">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info1,
@@ -307,7 +307,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>summarize</h4>
-<pre>int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4135">summarize</a>()</pre>
+<pre>int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4143">summarize</a>()</pre>
 </li>
 </ul>
 <a name="detail-java.lang.String-">
@@ -316,7 +316,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>detail</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4136">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;details)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4144">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;details)</pre>
 </li>
 </ul>
 <a name="getErrorList--">
@@ -325,7 +325,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>getErrorList</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4137">getErrorList</a>()</pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4145">getErrorList</a>()</pre>
 </li>
 </ul>
 <a name="progress--">
@@ -334,7 +334,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>progress</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4138">progress</a>()</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4146">progress</a>()</pre>
 </li>
 </ul>
 <a name="print-java.lang.String-">
@@ -343,7 +343,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>print</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4139">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4147">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
 <a name="resetErrors--">
@@ -352,7 +352,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>resetErrors</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4140">resetErrors</a>()</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4148">resetErrors</a>()</pre>
 </li>
 </ul>
 <a name="tableHasErrors-org.apache.hadoop.hbase.util.HBaseFsck.TableInfo-">
@@ -361,7 +361,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockListLast">
 <li class="blockList">
 <h4>tableHasErrors</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4141">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4149">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 </li>
 </ul>
 </li>


[25/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
index 22e7059..01b8a09 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
@@ -362,235 +362,231 @@
 <span class="sourceLineNo">354</span>  }<a name="line.354"></a>
 <span class="sourceLineNo">355</span><a name="line.355"></a>
 <span class="sourceLineNo">356</span>  @Override<a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /*<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Scanners are ordered from 0 (oldest) to newest in increasing order.<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   */<a name="line.359"></a>
-<span class="sourceLineNo">360</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    MutableSegment activeTmp = active;<a name="line.361"></a>
-<span class="sourceLineNo">362</span>    List&lt;? extends Segment&gt; pipelineList = pipeline.getSegments();<a name="line.362"></a>
-<span class="sourceLineNo">363</span>    List&lt;? extends Segment&gt; snapshotList = snapshot.getAllSegments();<a name="line.363"></a>
-<span class="sourceLineNo">364</span>    long order = 1L + pipelineList.size() + snapshotList.size();<a name="line.364"></a>
-<span class="sourceLineNo">365</span>    // The list of elements in pipeline + the active element + the snapshot segment<a name="line.365"></a>
-<span class="sourceLineNo">366</span>    // The order is the Segment ordinal<a name="line.366"></a>
-<span class="sourceLineNo">367</span>    List&lt;KeyValueScanner&gt; list = createList((int) order);<a name="line.367"></a>
-<span class="sourceLineNo">368</span>    order = addToScanners(activeTmp, readPt, order, list);<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    order = addToScanners(pipelineList, readPt, order, list);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    addToScanners(snapshotList, readPt, order, list);<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    return list;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>  }<a name="line.372"></a>
-<span class="sourceLineNo">373</span><a name="line.373"></a>
-<span class="sourceLineNo">374</span>   @VisibleForTesting<a name="line.374"></a>
-<span class="sourceLineNo">375</span>   protected List&lt;KeyValueScanner&gt; createList(int capacity) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>     return new ArrayList&lt;&gt;(capacity);<a name="line.376"></a>
-<span class="sourceLineNo">377</span>   }<a name="line.377"></a>
-<span class="sourceLineNo">378</span><a name="line.378"></a>
-<span class="sourceLineNo">379</span>  /**<a name="line.379"></a>
-<span class="sourceLineNo">380</span>   * Check whether anything need to be done based on the current active set size.<a name="line.380"></a>
-<span class="sourceLineNo">381</span>   * The method is invoked upon every addition to the active set.<a name="line.381"></a>
-<span class="sourceLineNo">382</span>   * For CompactingMemStore, flush the active set to the read-only memory if it's<a name="line.382"></a>
-<span class="sourceLineNo">383</span>   * size is above threshold<a name="line.383"></a>
-<span class="sourceLineNo">384</span>   */<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  @Override<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  protected void checkActiveSize() {<a name="line.386"></a>
-<span class="sourceLineNo">387</span>    if (shouldFlushInMemory()) {<a name="line.387"></a>
-<span class="sourceLineNo">388</span>      /* The thread is dispatched to flush-in-memory. This cannot be done<a name="line.388"></a>
-<span class="sourceLineNo">389</span>      * on the same thread, because for flush-in-memory we require updatesLock<a name="line.389"></a>
-<span class="sourceLineNo">390</span>      * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      * in the shared mode. */<a name="line.391"></a>
-<span class="sourceLineNo">392</span>      InMemoryFlushRunnable runnable = new InMemoryFlushRunnable();<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      if (LOG.isTraceEnabled()) {<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        LOG.trace(<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          "Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());<a name="line.395"></a>
-<span class="sourceLineNo">396</span>      }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      getPool().execute(runnable);<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span>  }<a name="line.399"></a>
-<span class="sourceLineNo">400</span><a name="line.400"></a>
-<span class="sourceLineNo">401</span>  // internally used method, externally visible only for tests<a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  // otherwise there is a deadlock<a name="line.403"></a>
-<span class="sourceLineNo">404</span>  @VisibleForTesting<a name="line.404"></a>
-<span class="sourceLineNo">405</span>  void flushInMemory() throws IOException {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>    // setting the inMemoryFlushInProgress flag again for the case this method is invoked<a name="line.406"></a>
-<span class="sourceLineNo">407</span>    // directly (only in tests) in the common path setting from true to true is idempotent<a name="line.407"></a>
-<span class="sourceLineNo">408</span>    inMemoryFlushInProgress.set(true);<a name="line.408"></a>
-<span class="sourceLineNo">409</span>    try {<a name="line.409"></a>
-<span class="sourceLineNo">410</span>      // Phase I: Update the pipeline<a name="line.410"></a>
-<span class="sourceLineNo">411</span>      getRegionServices().blockUpdates();<a name="line.411"></a>
-<span class="sourceLineNo">412</span>      try {<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");<a name="line.413"></a>
-<span class="sourceLineNo">414</span>        pushActiveToPipeline(this.active);<a name="line.414"></a>
-<span class="sourceLineNo">415</span>      } finally {<a name="line.415"></a>
-<span class="sourceLineNo">416</span>        getRegionServices().unblockUpdates();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>      }<a name="line.417"></a>
-<span class="sourceLineNo">418</span><a name="line.418"></a>
-<span class="sourceLineNo">419</span>      // Used by tests<a name="line.419"></a>
-<span class="sourceLineNo">420</span>      if (!allowCompaction.get()) {<a name="line.420"></a>
-<span class="sourceLineNo">421</span>        return;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      // Phase II: Compact the pipeline<a name="line.423"></a>
-<span class="sourceLineNo">424</span>      try {<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        // Speculative compaction execution, may be interrupted if flush is forced while<a name="line.425"></a>
-<span class="sourceLineNo">426</span>        // compaction is in progress<a name="line.426"></a>
-<span class="sourceLineNo">427</span>        compactor.start();<a name="line.427"></a>
-<span class="sourceLineNo">428</span>      } catch (IOException e) {<a name="line.428"></a>
-<span class="sourceLineNo">429</span>        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",<a name="line.429"></a>
-<span class="sourceLineNo">430</span>            getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>      }<a name="line.431"></a>
-<span class="sourceLineNo">432</span>    } finally {<a name="line.432"></a>
-<span class="sourceLineNo">433</span>      inMemoryFlushInProgress.set(false);<a name="line.433"></a>
-<span class="sourceLineNo">434</span>      LOG.trace("IN-MEMORY FLUSH: end");<a name="line.434"></a>
-<span class="sourceLineNo">435</span>    }<a name="line.435"></a>
-<span class="sourceLineNo">436</span>  }<a name="line.436"></a>
-<span class="sourceLineNo">437</span><a name="line.437"></a>
-<span class="sourceLineNo">438</span>  private Segment getLastSegment() {<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    Segment localActive = getActive();<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    Segment tail = pipeline.getTail();<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    return tail == null ? localActive : tail;<a name="line.441"></a>
+<span class="sourceLineNo">357</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    MutableSegment activeTmp = active;<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    List&lt;? extends Segment&gt; pipelineList = pipeline.getSegments();<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    List&lt;? extends Segment&gt; snapshotList = snapshot.getAllSegments();<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    long numberOfSegments = 1L + pipelineList.size() + snapshotList.size();<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    // The list of elements in pipeline + the active element + the snapshot segment<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    List&lt;KeyValueScanner&gt; list = createList((int) numberOfSegments);<a name="line.363"></a>
+<span class="sourceLineNo">364</span>    addToScanners(activeTmp, readPt, list);<a name="line.364"></a>
+<span class="sourceLineNo">365</span>    addToScanners(pipelineList, readPt, list);<a name="line.365"></a>
+<span class="sourceLineNo">366</span>    addToScanners(snapshotList, readPt, list);<a name="line.366"></a>
+<span class="sourceLineNo">367</span>    return list;<a name="line.367"></a>
+<span class="sourceLineNo">368</span>  }<a name="line.368"></a>
+<span class="sourceLineNo">369</span><a name="line.369"></a>
+<span class="sourceLineNo">370</span>   @VisibleForTesting<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   protected List&lt;KeyValueScanner&gt; createList(int capacity) {<a name="line.371"></a>
+<span class="sourceLineNo">372</span>     return new ArrayList&lt;&gt;(capacity);<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   }<a name="line.373"></a>
+<span class="sourceLineNo">374</span><a name="line.374"></a>
+<span class="sourceLineNo">375</span>  /**<a name="line.375"></a>
+<span class="sourceLineNo">376</span>   * Check whether anything need to be done based on the current active set size.<a name="line.376"></a>
+<span class="sourceLineNo">377</span>   * The method is invoked upon every addition to the active set.<a name="line.377"></a>
+<span class="sourceLineNo">378</span>   * For CompactingMemStore, flush the active set to the read-only memory if it's<a name="line.378"></a>
+<span class="sourceLineNo">379</span>   * size is above threshold<a name="line.379"></a>
+<span class="sourceLineNo">380</span>   */<a name="line.380"></a>
+<span class="sourceLineNo">381</span>  @Override<a name="line.381"></a>
+<span class="sourceLineNo">382</span>  protected void checkActiveSize() {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    if (shouldFlushInMemory()) {<a name="line.383"></a>
+<span class="sourceLineNo">384</span>      /* The thread is dispatched to flush-in-memory. This cannot be done<a name="line.384"></a>
+<span class="sourceLineNo">385</span>      * on the same thread, because for flush-in-memory we require updatesLock<a name="line.385"></a>
+<span class="sourceLineNo">386</span>      * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      * in the shared mode. */<a name="line.387"></a>
+<span class="sourceLineNo">388</span>      InMemoryFlushRunnable runnable = new InMemoryFlushRunnable();<a name="line.388"></a>
+<span class="sourceLineNo">389</span>      if (LOG.isTraceEnabled()) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>        LOG.trace(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>          "Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());<a name="line.391"></a>
+<span class="sourceLineNo">392</span>      }<a name="line.392"></a>
+<span class="sourceLineNo">393</span>      getPool().execute(runnable);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
+<span class="sourceLineNo">395</span>  }<a name="line.395"></a>
+<span class="sourceLineNo">396</span><a name="line.396"></a>
+<span class="sourceLineNo">397</span>  // internally used method, externally visible only for tests<a name="line.397"></a>
+<span class="sourceLineNo">398</span>  // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,<a name="line.398"></a>
+<span class="sourceLineNo">399</span>  // otherwise there is a deadlock<a name="line.399"></a>
+<span class="sourceLineNo">400</span>  @VisibleForTesting<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  void flushInMemory() throws IOException {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    // setting the inMemoryFlushInProgress flag again for the case this method is invoked<a name="line.402"></a>
+<span class="sourceLineNo">403</span>    // directly (only in tests) in the common path setting from true to true is idempotent<a name="line.403"></a>
+<span class="sourceLineNo">404</span>    inMemoryFlushInProgress.set(true);<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    try {<a name="line.405"></a>
+<span class="sourceLineNo">406</span>      // Phase I: Update the pipeline<a name="line.406"></a>
+<span class="sourceLineNo">407</span>      getRegionServices().blockUpdates();<a name="line.407"></a>
+<span class="sourceLineNo">408</span>      try {<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");<a name="line.409"></a>
+<span class="sourceLineNo">410</span>        pushActiveToPipeline(this.active);<a name="line.410"></a>
+<span class="sourceLineNo">411</span>      } finally {<a name="line.411"></a>
+<span class="sourceLineNo">412</span>        getRegionServices().unblockUpdates();<a name="line.412"></a>
+<span class="sourceLineNo">413</span>      }<a name="line.413"></a>
+<span class="sourceLineNo">414</span><a name="line.414"></a>
+<span class="sourceLineNo">415</span>      // Used by tests<a name="line.415"></a>
+<span class="sourceLineNo">416</span>      if (!allowCompaction.get()) {<a name="line.416"></a>
+<span class="sourceLineNo">417</span>        return;<a name="line.417"></a>
+<span class="sourceLineNo">418</span>      }<a name="line.418"></a>
+<span class="sourceLineNo">419</span>      // Phase II: Compact the pipeline<a name="line.419"></a>
+<span class="sourceLineNo">420</span>      try {<a name="line.420"></a>
+<span class="sourceLineNo">421</span>        // Speculative compaction execution, may be interrupted if flush is forced while<a name="line.421"></a>
+<span class="sourceLineNo">422</span>        // compaction is in progress<a name="line.422"></a>
+<span class="sourceLineNo">423</span>        compactor.start();<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      } catch (IOException e) {<a name="line.424"></a>
+<span class="sourceLineNo">425</span>        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",<a name="line.425"></a>
+<span class="sourceLineNo">426</span>            getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e);<a name="line.426"></a>
+<span class="sourceLineNo">427</span>      }<a name="line.427"></a>
+<span class="sourceLineNo">428</span>    } finally {<a name="line.428"></a>
+<span class="sourceLineNo">429</span>      inMemoryFlushInProgress.set(false);<a name="line.429"></a>
+<span class="sourceLineNo">430</span>      LOG.trace("IN-MEMORY FLUSH: end");<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    }<a name="line.431"></a>
+<span class="sourceLineNo">432</span>  }<a name="line.432"></a>
+<span class="sourceLineNo">433</span><a name="line.433"></a>
+<span class="sourceLineNo">434</span>  private Segment getLastSegment() {<a name="line.434"></a>
+<span class="sourceLineNo">435</span>    Segment localActive = getActive();<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    Segment tail = pipeline.getTail();<a name="line.436"></a>
+<span class="sourceLineNo">437</span>    return tail == null ? localActive : tail;<a name="line.437"></a>
+<span class="sourceLineNo">438</span>  }<a name="line.438"></a>
+<span class="sourceLineNo">439</span><a name="line.439"></a>
+<span class="sourceLineNo">440</span>  private byte[] getFamilyNameInBytes() {<a name="line.440"></a>
+<span class="sourceLineNo">441</span>    return store.getColumnFamilyDescriptor().getName();<a name="line.441"></a>
 <span class="sourceLineNo">442</span>  }<a name="line.442"></a>
 <span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>  private byte[] getFamilyNameInBytes() {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>    return store.getColumnFamilyDescriptor().getName();<a name="line.445"></a>
+<span class="sourceLineNo">444</span>  private ThreadPoolExecutor getPool() {<a name="line.444"></a>
+<span class="sourceLineNo">445</span>    return getRegionServices().getInMemoryCompactionPool();<a name="line.445"></a>
 <span class="sourceLineNo">446</span>  }<a name="line.446"></a>
 <span class="sourceLineNo">447</span><a name="line.447"></a>
-<span class="sourceLineNo">448</span>  private ThreadPoolExecutor getPool() {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>    return getRegionServices().getInMemoryCompactionPool();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>  }<a name="line.450"></a>
-<span class="sourceLineNo">451</span><a name="line.451"></a>
-<span class="sourceLineNo">452</span>  @VisibleForTesting<a name="line.452"></a>
-<span class="sourceLineNo">453</span>  protected boolean shouldFlushInMemory() {<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    if (this.active.keySize() &gt; inmemoryFlushSize) { // size above flush threshold<a name="line.454"></a>
-<span class="sourceLineNo">455</span>      if (inWalReplay) {  // when replaying edits from WAL there is no need in in-memory flush<a name="line.455"></a>
-<span class="sourceLineNo">456</span>        return false;     // regardless the size<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>      // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      // the insert of the active into the compaction pipeline<a name="line.459"></a>
-<span class="sourceLineNo">460</span>      return (inMemoryFlushInProgress.compareAndSet(false,true));<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    }<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    return false;<a name="line.462"></a>
-<span class="sourceLineNo">463</span>  }<a name="line.463"></a>
-<span class="sourceLineNo">464</span><a name="line.464"></a>
-<span class="sourceLineNo">465</span>  /**<a name="line.465"></a>
-<span class="sourceLineNo">466</span>   * The request to cancel the compaction asynchronous task (caused by in-memory flush)<a name="line.466"></a>
-<span class="sourceLineNo">467</span>   * The compaction may still happen if the request was sent too late<a name="line.467"></a>
-<span class="sourceLineNo">468</span>   * Non-blocking request<a name="line.468"></a>
-<span class="sourceLineNo">469</span>   */<a name="line.469"></a>
-<span class="sourceLineNo">470</span>  private void stopCompaction() {<a name="line.470"></a>
-<span class="sourceLineNo">471</span>    if (inMemoryFlushInProgress.get()) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      compactor.stop();<a name="line.472"></a>
-<span class="sourceLineNo">473</span>    }<a name="line.473"></a>
-<span class="sourceLineNo">474</span>  }<a name="line.474"></a>
-<span class="sourceLineNo">475</span><a name="line.475"></a>
-<span class="sourceLineNo">476</span>  protected void pushActiveToPipeline(MutableSegment active) {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>    if (!active.isEmpty()) {<a name="line.477"></a>
-<span class="sourceLineNo">478</span>      pipeline.pushHead(active);<a name="line.478"></a>
-<span class="sourceLineNo">479</span>      resetActive();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>    }<a name="line.480"></a>
-<span class="sourceLineNo">481</span>  }<a name="line.481"></a>
-<span class="sourceLineNo">482</span><a name="line.482"></a>
-<span class="sourceLineNo">483</span>  private void pushTailToSnapshot() {<a name="line.483"></a>
-<span class="sourceLineNo">484</span>    VersionedSegmentsList segments = pipeline.getVersionedTail();<a name="line.484"></a>
-<span class="sourceLineNo">485</span>    pushToSnapshot(segments.getStoreSegments());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.486"></a>
-<span class="sourceLineNo">487</span>    pipeline.swap(segments,null,false, false);<a name="line.487"></a>
-<span class="sourceLineNo">488</span>  }<a name="line.488"></a>
-<span class="sourceLineNo">489</span><a name="line.489"></a>
-<span class="sourceLineNo">490</span>  private void pushPipelineToSnapshot() {<a name="line.490"></a>
-<span class="sourceLineNo">491</span>    int iterationsCnt = 0;<a name="line.491"></a>
-<span class="sourceLineNo">492</span>    boolean done = false;<a name="line.492"></a>
-<span class="sourceLineNo">493</span>    while (!done) {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      iterationsCnt++;<a name="line.494"></a>
-<span class="sourceLineNo">495</span>      VersionedSegmentsList segments = pipeline.getVersionedList();<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      pushToSnapshot(segments.getStoreSegments());<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      // swap can return false in case the pipeline was updated by ongoing compaction<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      // and the version increase, the chance of it happenning is very low<a name="line.498"></a>
-<span class="sourceLineNo">499</span>      // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      done = pipeline.swap(segments, null, false, false);<a name="line.500"></a>
-<span class="sourceLineNo">501</span>      if (iterationsCnt&gt;2) {<a name="line.501"></a>
-<span class="sourceLineNo">502</span>        // practically it is impossible that this loop iterates more than two times<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        // (because the compaction is stopped and none restarts it while in snapshot request),<a name="line.503"></a>
-<span class="sourceLineNo">504</span>        // however stopping here for the case of the infinite loop causing by any error<a name="line.504"></a>
-<span class="sourceLineNo">505</span>        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +<a name="line.505"></a>
-<span class="sourceLineNo">506</span>            " while flushing to disk.");<a name="line.506"></a>
-<span class="sourceLineNo">507</span>        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());<a name="line.507"></a>
-<span class="sourceLineNo">508</span>        break;<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      }<a name="line.509"></a>
-<span class="sourceLineNo">510</span>    }<a name="line.510"></a>
-<span class="sourceLineNo">511</span>  }<a name="line.511"></a>
-<span class="sourceLineNo">512</span><a name="line.512"></a>
-<span class="sourceLineNo">513</span>  private void pushToSnapshot(List&lt;ImmutableSegment&gt; segments) {<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    if(segments.isEmpty()) return;<a name="line.514"></a>
-<span class="sourceLineNo">515</span>    if(segments.size() == 1 &amp;&amp; !segments.get(0).isEmpty()) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      this.snapshot = segments.get(0);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>      return;<a name="line.517"></a>
-<span class="sourceLineNo">518</span>    } else { // create composite snapshot<a name="line.518"></a>
-<span class="sourceLineNo">519</span>      this.snapshot =<a name="line.519"></a>
-<span class="sourceLineNo">520</span>          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    }<a name="line.521"></a>
+<span class="sourceLineNo">448</span>  @VisibleForTesting<a name="line.448"></a>
+<span class="sourceLineNo">449</span>  protected boolean shouldFlushInMemory() {<a name="line.449"></a>
+<span class="sourceLineNo">450</span>    if (this.active.keySize() &gt; inmemoryFlushSize) { // size above flush threshold<a name="line.450"></a>
+<span class="sourceLineNo">451</span>      if (inWalReplay) {  // when replaying edits from WAL there is no need in in-memory flush<a name="line.451"></a>
+<span class="sourceLineNo">452</span>        return false;     // regardless the size<a name="line.452"></a>
+<span class="sourceLineNo">453</span>      }<a name="line.453"></a>
+<span class="sourceLineNo">454</span>      // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude<a name="line.454"></a>
+<span class="sourceLineNo">455</span>      // the insert of the active into the compaction pipeline<a name="line.455"></a>
+<span class="sourceLineNo">456</span>      return (inMemoryFlushInProgress.compareAndSet(false,true));<a name="line.456"></a>
+<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>    return false;<a name="line.458"></a>
+<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>  /**<a name="line.461"></a>
+<span class="sourceLineNo">462</span>   * The request to cancel the compaction asynchronous task (caused by in-memory flush)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>   * The compaction may still happen if the request was sent too late<a name="line.463"></a>
+<span class="sourceLineNo">464</span>   * Non-blocking request<a name="line.464"></a>
+<span class="sourceLineNo">465</span>   */<a name="line.465"></a>
+<span class="sourceLineNo">466</span>  private void stopCompaction() {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>    if (inMemoryFlushInProgress.get()) {<a name="line.467"></a>
+<span class="sourceLineNo">468</span>      compactor.stop();<a name="line.468"></a>
+<span class="sourceLineNo">469</span>    }<a name="line.469"></a>
+<span class="sourceLineNo">470</span>  }<a name="line.470"></a>
+<span class="sourceLineNo">471</span><a name="line.471"></a>
+<span class="sourceLineNo">472</span>  protected void pushActiveToPipeline(MutableSegment active) {<a name="line.472"></a>
+<span class="sourceLineNo">473</span>    if (!active.isEmpty()) {<a name="line.473"></a>
+<span class="sourceLineNo">474</span>      pipeline.pushHead(active);<a name="line.474"></a>
+<span class="sourceLineNo">475</span>      resetActive();<a name="line.475"></a>
+<span class="sourceLineNo">476</span>    }<a name="line.476"></a>
+<span class="sourceLineNo">477</span>  }<a name="line.477"></a>
+<span class="sourceLineNo">478</span><a name="line.478"></a>
+<span class="sourceLineNo">479</span>  private void pushTailToSnapshot() {<a name="line.479"></a>
+<span class="sourceLineNo">480</span>    VersionedSegmentsList segments = pipeline.getVersionedTail();<a name="line.480"></a>
+<span class="sourceLineNo">481</span>    pushToSnapshot(segments.getStoreSegments());<a name="line.481"></a>
+<span class="sourceLineNo">482</span>    // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.482"></a>
+<span class="sourceLineNo">483</span>    pipeline.swap(segments,null,false, false);<a name="line.483"></a>
+<span class="sourceLineNo">484</span>  }<a name="line.484"></a>
+<span class="sourceLineNo">485</span><a name="line.485"></a>
+<span class="sourceLineNo">486</span>  private void pushPipelineToSnapshot() {<a name="line.486"></a>
+<span class="sourceLineNo">487</span>    int iterationsCnt = 0;<a name="line.487"></a>
+<span class="sourceLineNo">488</span>    boolean done = false;<a name="line.488"></a>
+<span class="sourceLineNo">489</span>    while (!done) {<a name="line.489"></a>
+<span class="sourceLineNo">490</span>      iterationsCnt++;<a name="line.490"></a>
+<span class="sourceLineNo">491</span>      VersionedSegmentsList segments = pipeline.getVersionedList();<a name="line.491"></a>
+<span class="sourceLineNo">492</span>      pushToSnapshot(segments.getStoreSegments());<a name="line.492"></a>
+<span class="sourceLineNo">493</span>      // swap can return false in case the pipeline was updated by ongoing compaction<a name="line.493"></a>
+<span class="sourceLineNo">494</span>      // and the version increase, the chance of it happenning is very low<a name="line.494"></a>
+<span class="sourceLineNo">495</span>      // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      done = pipeline.swap(segments, null, false, false);<a name="line.496"></a>
+<span class="sourceLineNo">497</span>      if (iterationsCnt&gt;2) {<a name="line.497"></a>
+<span class="sourceLineNo">498</span>        // practically it is impossible that this loop iterates more than two times<a name="line.498"></a>
+<span class="sourceLineNo">499</span>        // (because the compaction is stopped and none restarts it while in snapshot request),<a name="line.499"></a>
+<span class="sourceLineNo">500</span>        // however stopping here for the case of the infinite loop causing by any error<a name="line.500"></a>
+<span class="sourceLineNo">501</span>        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +<a name="line.501"></a>
+<span class="sourceLineNo">502</span>            " while flushing to disk.");<a name="line.502"></a>
+<span class="sourceLineNo">503</span>        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());<a name="line.503"></a>
+<span class="sourceLineNo">504</span>        break;<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      }<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    }<a name="line.506"></a>
+<span class="sourceLineNo">507</span>  }<a name="line.507"></a>
+<span class="sourceLineNo">508</span><a name="line.508"></a>
+<span class="sourceLineNo">509</span>  private void pushToSnapshot(List&lt;ImmutableSegment&gt; segments) {<a name="line.509"></a>
+<span class="sourceLineNo">510</span>    if(segments.isEmpty()) return;<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    if(segments.size() == 1 &amp;&amp; !segments.get(0).isEmpty()) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      this.snapshot = segments.get(0);<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      return;<a name="line.513"></a>
+<span class="sourceLineNo">514</span>    } else { // create composite snapshot<a name="line.514"></a>
+<span class="sourceLineNo">515</span>      this.snapshot =<a name="line.515"></a>
+<span class="sourceLineNo">516</span>          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>  }<a name="line.518"></a>
+<span class="sourceLineNo">519</span><a name="line.519"></a>
+<span class="sourceLineNo">520</span>  private RegionServicesForStores getRegionServices() {<a name="line.520"></a>
+<span class="sourceLineNo">521</span>    return regionServices;<a name="line.521"></a>
 <span class="sourceLineNo">522</span>  }<a name="line.522"></a>
 <span class="sourceLineNo">523</span><a name="line.523"></a>
-<span class="sourceLineNo">524</span>  private RegionServicesForStores getRegionServices() {<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    return regionServices;<a name="line.525"></a>
-<span class="sourceLineNo">526</span>  }<a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>  /**<a name="line.528"></a>
-<span class="sourceLineNo">529</span>  * The in-memory-flusher thread performs the flush asynchronously.<a name="line.529"></a>
-<span class="sourceLineNo">530</span>  * There is at most one thread per memstore instance.<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock<a name="line.531"></a>
-<span class="sourceLineNo">532</span>  * and compacts the pipeline.<a name="line.532"></a>
-<span class="sourceLineNo">533</span>  */<a name="line.533"></a>
-<span class="sourceLineNo">534</span>  private class InMemoryFlushRunnable implements Runnable {<a name="line.534"></a>
-<span class="sourceLineNo">535</span><a name="line.535"></a>
-<span class="sourceLineNo">536</span>    @Override<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    public void run() {<a name="line.537"></a>
-<span class="sourceLineNo">538</span>      try {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        flushInMemory();<a name="line.539"></a>
-<span class="sourceLineNo">540</span>      } catch (IOException e) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>        LOG.warn("Unable to run memstore compaction. region "<a name="line.541"></a>
-<span class="sourceLineNo">542</span>            + getRegionServices().getRegionInfo().getRegionNameAsString()<a name="line.542"></a>
-<span class="sourceLineNo">543</span>            + "store: "+ getFamilyName(), e);<a name="line.543"></a>
-<span class="sourceLineNo">544</span>      }<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
-<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
-<span class="sourceLineNo">547</span><a name="line.547"></a>
-<span class="sourceLineNo">548</span>  @VisibleForTesting<a name="line.548"></a>
-<span class="sourceLineNo">549</span>  boolean isMemStoreFlushingInMemory() {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>    return inMemoryFlushInProgress.get();<a name="line.550"></a>
-<span class="sourceLineNo">551</span>  }<a name="line.551"></a>
-<span class="sourceLineNo">552</span><a name="line.552"></a>
-<span class="sourceLineNo">553</span>  /**<a name="line.553"></a>
-<span class="sourceLineNo">554</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.554"></a>
-<span class="sourceLineNo">555</span>   *             first.<a name="line.555"></a>
-<span class="sourceLineNo">556</span>   * @return Next row or null if none found.<a name="line.556"></a>
-<span class="sourceLineNo">557</span>   */<a name="line.557"></a>
-<span class="sourceLineNo">558</span>  Cell getNextRow(final Cell cell) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Cell lowest = null;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    List&lt;Segment&gt; segments = getSegments();<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    for (Segment segment : segments) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      if (lowest == null) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        lowest = getNextRow(cell, segment.getCellSet());<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      } else {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));<a name="line.565"></a>
-<span class="sourceLineNo">566</span>      }<a name="line.566"></a>
-<span class="sourceLineNo">567</span>    }<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    return lowest;<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  }<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>  @VisibleForTesting<a name="line.571"></a>
-<span class="sourceLineNo">572</span>  long getInmemoryFlushSize() {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    return inmemoryFlushSize;<a name="line.573"></a>
-<span class="sourceLineNo">574</span>  }<a name="line.574"></a>
-<span class="sourceLineNo">575</span><a name="line.575"></a>
-<span class="sourceLineNo">576</span>  // debug method<a name="line.576"></a>
-<span class="sourceLineNo">577</span>  public void debug() {<a name="line.577"></a>
-<span class="sourceLineNo">578</span>    String msg = "active size=" + this.active.keySize();<a name="line.578"></a>
-<span class="sourceLineNo">579</span>    msg += " in-memory flush size is "+ inmemoryFlushSize;<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");<a name="line.580"></a>
-<span class="sourceLineNo">581</span>    msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");<a name="line.581"></a>
-<span class="sourceLineNo">582</span>    LOG.debug(msg);<a name="line.582"></a>
-<span class="sourceLineNo">583</span>  }<a name="line.583"></a>
-<span class="sourceLineNo">584</span><a name="line.584"></a>
-<span class="sourceLineNo">585</span>}<a name="line.585"></a>
+<span class="sourceLineNo">524</span>  /**<a name="line.524"></a>
+<span class="sourceLineNo">525</span>  * The in-memory-flusher thread performs the flush asynchronously.<a name="line.525"></a>
+<span class="sourceLineNo">526</span>  * There is at most one thread per memstore instance.<a name="line.526"></a>
+<span class="sourceLineNo">527</span>  * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock<a name="line.527"></a>
+<span class="sourceLineNo">528</span>  * and compacts the pipeline.<a name="line.528"></a>
+<span class="sourceLineNo">529</span>  */<a name="line.529"></a>
+<span class="sourceLineNo">530</span>  private class InMemoryFlushRunnable implements Runnable {<a name="line.530"></a>
+<span class="sourceLineNo">531</span><a name="line.531"></a>
+<span class="sourceLineNo">532</span>    @Override<a name="line.532"></a>
+<span class="sourceLineNo">533</span>    public void run() {<a name="line.533"></a>
+<span class="sourceLineNo">534</span>      try {<a name="line.534"></a>
+<span class="sourceLineNo">535</span>        flushInMemory();<a name="line.535"></a>
+<span class="sourceLineNo">536</span>      } catch (IOException e) {<a name="line.536"></a>
+<span class="sourceLineNo">537</span>        LOG.warn("Unable to run memstore compaction. region "<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            + getRegionServices().getRegionInfo().getRegionNameAsString()<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            + "store: "+ getFamilyName(), e);<a name="line.539"></a>
+<span class="sourceLineNo">540</span>      }<a name="line.540"></a>
+<span class="sourceLineNo">541</span>    }<a name="line.541"></a>
+<span class="sourceLineNo">542</span>  }<a name="line.542"></a>
+<span class="sourceLineNo">543</span><a name="line.543"></a>
+<span class="sourceLineNo">544</span>  @VisibleForTesting<a name="line.544"></a>
+<span class="sourceLineNo">545</span>  boolean isMemStoreFlushingInMemory() {<a name="line.545"></a>
+<span class="sourceLineNo">546</span>    return inMemoryFlushInProgress.get();<a name="line.546"></a>
+<span class="sourceLineNo">547</span>  }<a name="line.547"></a>
+<span class="sourceLineNo">548</span><a name="line.548"></a>
+<span class="sourceLineNo">549</span>  /**<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   *             first.<a name="line.551"></a>
+<span class="sourceLineNo">552</span>   * @return Next row or null if none found.<a name="line.552"></a>
+<span class="sourceLineNo">553</span>   */<a name="line.553"></a>
+<span class="sourceLineNo">554</span>  Cell getNextRow(final Cell cell) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>    Cell lowest = null;<a name="line.555"></a>
+<span class="sourceLineNo">556</span>    List&lt;Segment&gt; segments = getSegments();<a name="line.556"></a>
+<span class="sourceLineNo">557</span>    for (Segment segment : segments) {<a name="line.557"></a>
+<span class="sourceLineNo">558</span>      if (lowest == null) {<a name="line.558"></a>
+<span class="sourceLineNo">559</span>        lowest = getNextRow(cell, segment.getCellSet());<a name="line.559"></a>
+<span class="sourceLineNo">560</span>      } else {<a name="line.560"></a>
+<span class="sourceLineNo">561</span>        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));<a name="line.561"></a>
+<span class="sourceLineNo">562</span>      }<a name="line.562"></a>
+<span class="sourceLineNo">563</span>    }<a name="line.563"></a>
+<span class="sourceLineNo">564</span>    return lowest;<a name="line.564"></a>
+<span class="sourceLineNo">565</span>  }<a name="line.565"></a>
+<span class="sourceLineNo">566</span><a name="line.566"></a>
+<span class="sourceLineNo">567</span>  @VisibleForTesting<a name="line.567"></a>
+<span class="sourceLineNo">568</span>  long getInmemoryFlushSize() {<a name="line.568"></a>
+<span class="sourceLineNo">569</span>    return inmemoryFlushSize;<a name="line.569"></a>
+<span class="sourceLineNo">570</span>  }<a name="line.570"></a>
+<span class="sourceLineNo">571</span><a name="line.571"></a>
+<span class="sourceLineNo">572</span>  // debug method<a name="line.572"></a>
+<span class="sourceLineNo">573</span>  public void debug() {<a name="line.573"></a>
+<span class="sourceLineNo">574</span>    String msg = "active size=" + this.active.keySize();<a name="line.574"></a>
+<span class="sourceLineNo">575</span>    msg += " in-memory flush size is "+ inmemoryFlushSize;<a name="line.575"></a>
+<span class="sourceLineNo">576</span>    msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");<a name="line.576"></a>
+<span class="sourceLineNo">577</span>    msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");<a name="line.577"></a>
+<span class="sourceLineNo">578</span>    LOG.debug(msg);<a name="line.578"></a>
+<span class="sourceLineNo">579</span>  }<a name="line.579"></a>
+<span class="sourceLineNo">580</span><a name="line.580"></a>
+<span class="sourceLineNo">581</span>}<a name="line.581"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
index f8fd68d..90d50ec 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
@@ -128,174 +128,166 @@
 <span class="sourceLineNo">120</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.120"></a>
 <span class="sourceLineNo">121</span>  }<a name="line.121"></a>
 <span class="sourceLineNo">122</span><a name="line.122"></a>
-<span class="sourceLineNo">123</span>  /**<a name="line.123"></a>
-<span class="sourceLineNo">124</span>   * Creates the scanner for the given read point, and a specific order in a list<a name="line.124"></a>
-<span class="sourceLineNo">125</span>   * @return a scanner for the given read point<a name="line.125"></a>
-<span class="sourceLineNo">126</span>   */<a name="line.126"></a>
-<span class="sourceLineNo">127</span>  @Override<a name="line.127"></a>
-<span class="sourceLineNo">128</span>  public KeyValueScanner getScanner(long readPoint, long order) {<a name="line.128"></a>
-<span class="sourceLineNo">129</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.129"></a>
-<span class="sourceLineNo">130</span>  }<a name="line.130"></a>
-<span class="sourceLineNo">131</span><a name="line.131"></a>
-<span class="sourceLineNo">132</span>  @Override<a name="line.132"></a>
-<span class="sourceLineNo">133</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPoint, long order) {<a name="line.133"></a>
-<span class="sourceLineNo">134</span>    List&lt;KeyValueScanner&gt; list = new ArrayList&lt;&gt;(segments.size());<a name="line.134"></a>
-<span class="sourceLineNo">135</span>    AbstractMemStore.addToScanners(segments, readPoint, order, list);<a name="line.135"></a>
-<span class="sourceLineNo">136</span>    return list;<a name="line.136"></a>
+<span class="sourceLineNo">123</span><a name="line.123"></a>
+<span class="sourceLineNo">124</span>  @Override<a name="line.124"></a>
+<span class="sourceLineNo">125</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPoint) {<a name="line.125"></a>
+<span class="sourceLineNo">126</span>    List&lt;KeyValueScanner&gt; list = new ArrayList&lt;&gt;(segments.size());<a name="line.126"></a>
+<span class="sourceLineNo">127</span>    AbstractMemStore.addToScanners(segments, readPoint, list);<a name="line.127"></a>
+<span class="sourceLineNo">128</span>    return list;<a name="line.128"></a>
+<span class="sourceLineNo">129</span>  }<a name="line.129"></a>
+<span class="sourceLineNo">130</span><a name="line.130"></a>
+<span class="sourceLineNo">131</span>  @Override<a name="line.131"></a>
+<span class="sourceLineNo">132</span>  public boolean isTagsPresent() {<a name="line.132"></a>
+<span class="sourceLineNo">133</span>    for (ImmutableSegment s : segments) {<a name="line.133"></a>
+<span class="sourceLineNo">134</span>      if (s.isTagsPresent()) return true;<a name="line.134"></a>
+<span class="sourceLineNo">135</span>    }<a name="line.135"></a>
+<span class="sourceLineNo">136</span>    return false;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>  }<a name="line.137"></a>
 <span class="sourceLineNo">138</span><a name="line.138"></a>
 <span class="sourceLineNo">139</span>  @Override<a name="line.139"></a>
-<span class="sourceLineNo">140</span>  public boolean isTagsPresent() {<a name="line.140"></a>
-<span class="sourceLineNo">141</span>    for (ImmutableSegment s : segments) {<a name="line.141"></a>
-<span class="sourceLineNo">142</span>      if (s.isTagsPresent()) return true;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>    }<a name="line.143"></a>
-<span class="sourceLineNo">144</span>    return false;<a name="line.144"></a>
-<span class="sourceLineNo">145</span>  }<a name="line.145"></a>
-<span class="sourceLineNo">146</span><a name="line.146"></a>
-<span class="sourceLineNo">147</span>  @Override<a name="line.147"></a>
-<span class="sourceLineNo">148</span>  public void incScannerCount() {<a name="line.148"></a>
-<span class="sourceLineNo">149</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.149"></a>
-<span class="sourceLineNo">150</span>  }<a name="line.150"></a>
-<span class="sourceLineNo">151</span><a name="line.151"></a>
-<span class="sourceLineNo">152</span>  @Override<a name="line.152"></a>
-<span class="sourceLineNo">153</span>  public void decScannerCount() {<a name="line.153"></a>
-<span class="sourceLineNo">154</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.154"></a>
-<span class="sourceLineNo">155</span>  }<a name="line.155"></a>
-<span class="sourceLineNo">156</span><a name="line.156"></a>
-<span class="sourceLineNo">157</span>  /**<a name="line.157"></a>
-<span class="sourceLineNo">158</span>   * Setting the CellSet of the segment - used only for flat immutable segment for setting<a name="line.158"></a>
-<span class="sourceLineNo">159</span>   * immutable CellSet after its creation in immutable segment constructor<a name="line.159"></a>
-<span class="sourceLineNo">160</span>   * @return this object<a name="line.160"></a>
-<span class="sourceLineNo">161</span>   */<a name="line.161"></a>
-<span class="sourceLineNo">162</span>  @Override<a name="line.162"></a>
-<span class="sourceLineNo">163</span>  protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {<a name="line.163"></a>
-<span class="sourceLineNo">164</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.164"></a>
-<span class="sourceLineNo">165</span>  }<a name="line.165"></a>
-<span class="sourceLineNo">166</span><a name="line.166"></a>
-<span class="sourceLineNo">167</span><a name="line.167"></a>
-<span class="sourceLineNo">168</span>  @Override<a name="line.168"></a>
-<span class="sourceLineNo">169</span>  protected long indexEntrySize() {<a name="line.169"></a>
-<span class="sourceLineNo">170</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.170"></a>
-<span class="sourceLineNo">171</span>  }<a name="line.171"></a>
-<span class="sourceLineNo">172</span><a name="line.172"></a>
-<span class="sourceLineNo">173</span>  @Override protected boolean canBeFlattened() {<a name="line.173"></a>
-<span class="sourceLineNo">174</span>    return false;<a name="line.174"></a>
+<span class="sourceLineNo">140</span>  public void incScannerCount() {<a name="line.140"></a>
+<span class="sourceLineNo">141</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.141"></a>
+<span class="sourceLineNo">142</span>  }<a name="line.142"></a>
+<span class="sourceLineNo">143</span><a name="line.143"></a>
+<span class="sourceLineNo">144</span>  @Override<a name="line.144"></a>
+<span class="sourceLineNo">145</span>  public void decScannerCount() {<a name="line.145"></a>
+<span class="sourceLineNo">146</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.146"></a>
+<span class="sourceLineNo">147</span>  }<a name="line.147"></a>
+<span class="sourceLineNo">148</span><a name="line.148"></a>
+<span class="sourceLineNo">149</span>  /**<a name="line.149"></a>
+<span class="sourceLineNo">150</span>   * Setting the CellSet of the segment - used only for flat immutable segment for setting<a name="line.150"></a>
+<span class="sourceLineNo">151</span>   * immutable CellSet after its creation in immutable segment constructor<a name="line.151"></a>
+<span class="sourceLineNo">152</span>   * @return this object<a name="line.152"></a>
+<span class="sourceLineNo">153</span>   */<a name="line.153"></a>
+<span class="sourceLineNo">154</span>  @Override<a name="line.154"></a>
+<span class="sourceLineNo">155</span>  protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {<a name="line.155"></a>
+<span class="sourceLineNo">156</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.156"></a>
+<span class="sourceLineNo">157</span>  }<a name="line.157"></a>
+<span class="sourceLineNo">158</span><a name="line.158"></a>
+<span class="sourceLineNo">159</span><a name="line.159"></a>
+<span class="sourceLineNo">160</span>  @Override<a name="line.160"></a>
+<span class="sourceLineNo">161</span>  protected long indexEntrySize() {<a name="line.161"></a>
+<span class="sourceLineNo">162</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.162"></a>
+<span class="sourceLineNo">163</span>  }<a name="line.163"></a>
+<span class="sourceLineNo">164</span><a name="line.164"></a>
+<span class="sourceLineNo">165</span>  @Override protected boolean canBeFlattened() {<a name="line.165"></a>
+<span class="sourceLineNo">166</span>    return false;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>  }<a name="line.167"></a>
+<span class="sourceLineNo">168</span><a name="line.168"></a>
+<span class="sourceLineNo">169</span>  /**<a name="line.169"></a>
+<span class="sourceLineNo">170</span>   * @return Sum of all cell sizes.<a name="line.170"></a>
+<span class="sourceLineNo">171</span>   */<a name="line.171"></a>
+<span class="sourceLineNo">172</span>  @Override<a name="line.172"></a>
+<span class="sourceLineNo">173</span>  public long keySize() {<a name="line.173"></a>
+<span class="sourceLineNo">174</span>    return this.keySize;<a name="line.174"></a>
 <span class="sourceLineNo">175</span>  }<a name="line.175"></a>
 <span class="sourceLineNo">176</span><a name="line.176"></a>
 <span class="sourceLineNo">177</span>  /**<a name="line.177"></a>
-<span class="sourceLineNo">178</span>   * @return Sum of all cell sizes.<a name="line.178"></a>
+<span class="sourceLineNo">178</span>   * @return The heap size of this segment.<a name="line.178"></a>
 <span class="sourceLineNo">179</span>   */<a name="line.179"></a>
 <span class="sourceLineNo">180</span>  @Override<a name="line.180"></a>
-<span class="sourceLineNo">181</span>  public long keySize() {<a name="line.181"></a>
-<span class="sourceLineNo">182</span>    return this.keySize;<a name="line.182"></a>
-<span class="sourceLineNo">183</span>  }<a name="line.183"></a>
-<span class="sourceLineNo">184</span><a name="line.184"></a>
-<span class="sourceLineNo">185</span>  /**<a name="line.185"></a>
-<span class="sourceLineNo">186</span>   * @return The heap size of this segment.<a name="line.186"></a>
-<span class="sourceLineNo">187</span>   */<a name="line.187"></a>
-<span class="sourceLineNo">188</span>  @Override<a name="line.188"></a>
-<span class="sourceLineNo">189</span>  public long heapSize() {<a name="line.189"></a>
-<span class="sourceLineNo">190</span>    long result = 0;<a name="line.190"></a>
-<span class="sourceLineNo">191</span>    for (ImmutableSegment s : segments) {<a name="line.191"></a>
-<span class="sourceLineNo">192</span>      result += s.heapSize();<a name="line.192"></a>
-<span class="sourceLineNo">193</span>    }<a name="line.193"></a>
-<span class="sourceLineNo">194</span>    return result;<a name="line.194"></a>
+<span class="sourceLineNo">181</span>  public long heapSize() {<a name="line.181"></a>
+<span class="sourceLineNo">182</span>    long result = 0;<a name="line.182"></a>
+<span class="sourceLineNo">183</span>    for (ImmutableSegment s : segments) {<a name="line.183"></a>
+<span class="sourceLineNo">184</span>      result += s.heapSize();<a name="line.184"></a>
+<span class="sourceLineNo">185</span>    }<a name="line.185"></a>
+<span class="sourceLineNo">186</span>    return result;<a name="line.186"></a>
+<span class="sourceLineNo">187</span>  }<a name="line.187"></a>
+<span class="sourceLineNo">188</span><a name="line.188"></a>
+<span class="sourceLineNo">189</span>  /**<a name="line.189"></a>
+<span class="sourceLineNo">190</span>   * Updates the heap size counter of the segment by the given delta<a name="line.190"></a>
+<span class="sourceLineNo">191</span>   */<a name="line.191"></a>
+<span class="sourceLineNo">192</span>  @Override<a name="line.192"></a>
+<span class="sourceLineNo">193</span>  protected void incSize(long delta, long heapOverhead, long offHeapOverhead) {<a name="line.193"></a>
+<span class="sourceLineNo">194</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.194"></a>
 <span class="sourceLineNo">195</span>  }<a name="line.195"></a>
 <span class="sourceLineNo">196</span><a name="line.196"></a>
-<span class="sourceLineNo">197</span>  /**<a name="line.197"></a>
-<span class="sourceLineNo">198</span>   * Updates the heap size counter of the segment by the given delta<a name="line.198"></a>
-<span class="sourceLineNo">199</span>   */<a name="line.199"></a>
-<span class="sourceLineNo">200</span>  @Override<a name="line.200"></a>
-<span class="sourceLineNo">201</span>  protected void incSize(long delta, long heapOverhead, long offHeapOverhead) {<a name="line.201"></a>
-<span class="sourceLineNo">202</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.202"></a>
-<span class="sourceLineNo">203</span>  }<a name="line.203"></a>
-<span class="sourceLineNo">204</span><a name="line.204"></a>
-<span class="sourceLineNo">205</span>  @Override<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  public long getMinSequenceId() {<a name="line.206"></a>
-<span class="sourceLineNo">207</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  }<a name="line.208"></a>
-<span class="sourceLineNo">209</span><a name="line.209"></a>
-<span class="sourceLineNo">210</span>  @Override<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  public TimeRangeTracker getTimeRangeTracker() {<a name="line.211"></a>
-<span class="sourceLineNo">212</span>    return this.timeRangeTracker;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  }<a name="line.213"></a>
-<span class="sourceLineNo">214</span><a name="line.214"></a>
-<span class="sourceLineNo">215</span>  //*** Methods for SegmentsScanner<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  @Override<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  public Cell last() {<a name="line.217"></a>
-<span class="sourceLineNo">218</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  }<a name="line.219"></a>
-<span class="sourceLineNo">220</span><a name="line.220"></a>
-<span class="sourceLineNo">221</span>  @Override<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  public Iterator&lt;Cell&gt; iterator() {<a name="line.222"></a>
-<span class="sourceLineNo">223</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  }<a name="line.224"></a>
-<span class="sourceLineNo">225</span><a name="line.225"></a>
-<span class="sourceLineNo">226</span>  @Override<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  public SortedSet&lt;Cell&gt; headSet(Cell firstKeyOnRow) {<a name="line.227"></a>
-<span class="sourceLineNo">228</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  }<a name="line.229"></a>
-<span class="sourceLineNo">230</span><a name="line.230"></a>
-<span class="sourceLineNo">231</span>  @Override<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  public int compare(Cell left, Cell right) {<a name="line.232"></a>
-<span class="sourceLineNo">233</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  }<a name="line.234"></a>
-<span class="sourceLineNo">235</span><a name="line.235"></a>
+<span class="sourceLineNo">197</span>  @Override<a name="line.197"></a>
+<span class="sourceLineNo">198</span>  public long getMinSequenceId() {<a name="line.198"></a>
+<span class="sourceLineNo">199</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.199"></a>
+<span class="sourceLineNo">200</span>  }<a name="line.200"></a>
+<span class="sourceLineNo">201</span><a name="line.201"></a>
+<span class="sourceLineNo">202</span>  @Override<a name="line.202"></a>
+<span class="sourceLineNo">203</span>  public TimeRangeTracker getTimeRangeTracker() {<a name="line.203"></a>
+<span class="sourceLineNo">204</span>    return this.timeRangeTracker;<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  }<a name="line.205"></a>
+<span class="sourceLineNo">206</span><a name="line.206"></a>
+<span class="sourceLineNo">207</span>  //*** Methods for SegmentsScanner<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  @Override<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  public Cell last() {<a name="line.209"></a>
+<span class="sourceLineNo">210</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  }<a name="line.211"></a>
+<span class="sourceLineNo">212</span><a name="line.212"></a>
+<span class="sourceLineNo">213</span>  @Override<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  public Iterator&lt;Cell&gt; iterator() {<a name="line.214"></a>
+<span class="sourceLineNo">215</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  }<a name="line.216"></a>
+<span class="sourceLineNo">217</span><a name="line.217"></a>
+<span class="sourceLineNo">218</span>  @Override<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  public SortedSet&lt;Cell&gt; headSet(Cell firstKeyOnRow) {<a name="line.219"></a>
+<span class="sourceLineNo">220</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  }<a name="line.221"></a>
+<span class="sourceLineNo">222</span><a name="line.222"></a>
+<span class="sourceLineNo">223</span>  @Override<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  public int compare(Cell left, Cell right) {<a name="line.224"></a>
+<span class="sourceLineNo">225</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.225"></a>
+<span class="sourceLineNo">226</span>  }<a name="line.226"></a>
+<span class="sourceLineNo">227</span><a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @Override<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public int compareRows(Cell left, Cell right) {<a name="line.229"></a>
+<span class="sourceLineNo">230</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  }<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>  /**<a name="line.233"></a>
+<span class="sourceLineNo">234</span>   * @return a set of all cells in the segment<a name="line.234"></a>
+<span class="sourceLineNo">235</span>   */<a name="line.235"></a>
 <span class="sourceLineNo">236</span>  @Override<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  public int compareRows(Cell left, Cell right) {<a name="line.237"></a>
+<span class="sourceLineNo">237</span>  protected CellSet getCellSet() {<a name="line.237"></a>
 <span class="sourceLineNo">238</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.238"></a>
 <span class="sourceLineNo">239</span>  }<a name="line.239"></a>
 <span class="sourceLineNo">240</span><a name="line.240"></a>
-<span class="sourceLineNo">241</span>  /**<a name="line.241"></a>
-<span class="sourceLineNo">242</span>   * @return a set of all cells in the segment<a name="line.242"></a>
-<span class="sourceLineNo">243</span>   */<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  @Override<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  protected CellSet getCellSet() {<a name="line.245"></a>
-<span class="sourceLineNo">246</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  }<a name="line.247"></a>
-<span class="sourceLineNo">248</span><a name="line.248"></a>
-<span class="sourceLineNo">249</span>  @Override<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing) {<a name="line.250"></a>
-<span class="sourceLineNo">251</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  }<a name="line.252"></a>
-<span class="sourceLineNo">253</span><a name="line.253"></a>
-<span class="sourceLineNo">254</span>  @Override<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      MemStoreSizing memstoreSizing) {<a name="line.256"></a>
-<span class="sourceLineNo">257</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  }<a name="line.258"></a>
-<span class="sourceLineNo">259</span><a name="line.259"></a>
-<span class="sourceLineNo">260</span>  /**<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   * Returns a subset of the segment cell set, which starts with the given cell<a name="line.261"></a>
-<span class="sourceLineNo">262</span>   * @param firstCell a cell in the segment<a name="line.262"></a>
-<span class="sourceLineNo">263</span>   * @return a subset of the segment cell set, which starts with the given cell<a name="line.263"></a>
-<span class="sourceLineNo">264</span>   */<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  @Override<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  protected SortedSet&lt;Cell&gt; tailSet(Cell firstCell) {<a name="line.266"></a>
-<span class="sourceLineNo">267</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  }<a name="line.268"></a>
-<span class="sourceLineNo">269</span><a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // Debug methods<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  /**<a name="line.271"></a>
-<span class="sourceLineNo">272</span>   * Dumps all cells of the segment into the given log<a name="line.272"></a>
-<span class="sourceLineNo">273</span>   */<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  @Override<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  void dump(Logger log) {<a name="line.275"></a>
-<span class="sourceLineNo">276</span>    for (ImmutableSegment s : segments) {<a name="line.276"></a>
-<span class="sourceLineNo">277</span>      s.dump(log);<a name="line.277"></a>
-<span class="sourceLineNo">278</span>    }<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  }<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>  public String toString() {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>    StringBuilder sb =<a name="line.283"></a>
-<span class="sourceLineNo">284</span>        new StringBuilder("This is CompositeImmutableSegment and those are its segments:: ");<a name="line.284"></a>
-<span class="sourceLineNo">285</span>    for (ImmutableSegment s : segments) {<a name="line.285"></a>
-<span class="sourceLineNo">286</span>      sb.append(s.toString());<a name="line.286"></a>
-<span class="sourceLineNo">287</span>    }<a name="line.287"></a>
-<span class="sourceLineNo">288</span>    return sb.toString();<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  }<a name="line.289"></a>
-<span class="sourceLineNo">290</span>}<a name="line.290"></a>
+<span class="sourceLineNo">241</span>  @Override<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing) {<a name="line.242"></a>
+<span class="sourceLineNo">243</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.243"></a>
+<span class="sourceLineNo">244</span>  }<a name="line.244"></a>
+<span class="sourceLineNo">245</span><a name="line.245"></a>
+<span class="sourceLineNo">246</span>  @Override<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      MemStoreSizing memstoreSizing) {<a name="line.248"></a>
+<span class="sourceLineNo">249</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  }<a name="line.250"></a>
+<span class="sourceLineNo">251</span><a name="line.251"></a>
+<span class="sourceLineNo">252</span>  /**<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   * Returns a subset of the segment cell set, which starts with the given cell<a name="line.253"></a>
+<span class="sourceLineNo">254</span>   * @param firstCell a cell in the segment<a name="line.254"></a>
+<span class="sourceLineNo">255</span>   * @return a subset of the segment cell set, which starts with the given cell<a name="line.255"></a>
+<span class="sourceLineNo">256</span>   */<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  @Override<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  protected SortedSet&lt;Cell&gt; tailSet(Cell firstCell) {<a name="line.258"></a>
+<span class="sourceLineNo">259</span>    throw new IllegalStateException("Not supported by CompositeImmutableScanner");<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  }<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Debug methods<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  /**<a name="line.263"></a>
+<span class="sourceLineNo">264</span>   * Dumps all cells of the segment into the given log<a name="line.264"></a>
+<span class="sourceLineNo">265</span>   */<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  @Override<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  void dump(Logger log) {<a name="line.267"></a>
+<span class="sourceLineNo">268</span>    for (ImmutableSegment s : segments) {<a name="line.268"></a>
+<span class="sourceLineNo">269</span>      s.dump(log);<a name="line.269"></a>
+<span class="sourceLineNo">270</span>    }<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  }<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  @Override<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  public String toString() {<a name="line.274"></a>
+<span class="sourceLineNo">275</span>    StringBuilder sb =<a name="line.275"></a>
+<span class="sourceLineNo">276</span>        new StringBuilder("This is CompositeImmutableSegment and those are its segments:: ");<a name="line.276"></a>
+<span class="sourceLineNo">277</span>    for (ImmutableSegment s : segments) {<a name="line.277"></a>
+<span class="sourceLineNo">278</span>      sb.append(s.toString());<a name="line.278"></a>
+<span class="sourceLineNo">279</span>    }<a name="line.279"></a>
+<span class="sourceLineNo">280</span>    return sb.toString();<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  }<a name="line.281"></a>
+<span class="sourceLineNo">282</span>}<a name="line.282"></a>
 
 
 


[14/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed to create lock file

<TRUNCATED>

[43/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/ede30993
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/ede30993
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/ede30993

Branch: refs/heads/asf-site
Commit: ede30993e541bdad212420f0742afb28656a60f7
Parents: d6d46e7
Author: jenkins <bu...@apache.org>
Authored: Thu Apr 19 14:46:11 2018 +0000
Committer: jenkins <bu...@apache.org>
Committed: Thu Apr 19 14:46:11 2018 +0000

----------------------------------------------------------------------
 acid-semantics.html                             |     4 +-
 apache_hbase_reference_guide.pdf                | 30753 +++++++++--------
 .../apache/hadoop/hbase/util/VersionInfo.html   |     2 +-
 .../apache/hadoop/hbase/util/VersionInfo.html   |    91 +-
 book.html                                       |    17 +-
 bulk-loads.html                                 |     4 +-
 checkstyle-aggregate.html                       | 12018 ++++---
 checkstyle.rss                                  |     8 +-
 coc.html                                        |     4 +-
 dependencies.html                               |     4 +-
 dependency-convergence.html                     |     4 +-
 dependency-info.html                            |     4 +-
 dependency-management.html                      |     4 +-
 devapidocs/constant-values.html                 |    65 +-
 devapidocs/index-all.html                       |    49 +-
 .../hadoop/hbase/backup/package-tree.html       |     4 +-
 .../hadoop/hbase/client/package-tree.html       |    22 +-
 .../hadoop/hbase/filter/package-tree.html       |    12 +-
 .../hadoop/hbase/io/hfile/package-tree.html     |     6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |     2 +-
 .../hadoop/hbase/mapreduce/package-tree.html    |     4 +-
 .../SimpleRegionNormalizer.PlanComparator.html  |     6 +-
 .../normalizer/SimpleRegionNormalizer.html      |    40 +-
 .../hadoop/hbase/master/package-tree.html       |     6 +-
 .../hbase/master/procedure/package-tree.html    |     4 +-
 .../hadoop/hbase/monitoring/package-tree.html   |     2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |    16 +-
 .../hadoop/hbase/procedure2/package-tree.html   |     4 +-
 .../hadoop/hbase/quotas/package-tree.html       |     6 +-
 .../hbase/regionserver/AbstractMemStore.html    |    74 +-
 .../regionserver/CSLMImmutableSegment.html      |     2 +-
 .../regionserver/CellArrayImmutableSegment.html |     2 +-
 .../regionserver/CellChunkImmutableSegment.html |     2 +-
 ...ompactingMemStore.InMemoryFlushRunnable.html |     6 +-
 .../hbase/regionserver/CompactingMemStore.html  |    38 +-
 .../regionserver/CompositeImmutableSegment.html |   116 +-
 .../hbase/regionserver/DefaultMemStore.html     |    18 +-
 .../hbase/regionserver/ImmutableSegment.html    |     2 +-
 .../hadoop/hbase/regionserver/KeyValueHeap.html |    51 +-
 .../hbase/regionserver/KeyValueScanner.html     |    18 +-
 .../MemStoreCompactorSegmentsIterator.html      |    12 +-
 .../MemStoreMergerSegmentsIterator.html         |     8 +-
 .../hbase/regionserver/MobStoreScanner.html     |     4 +-
 .../hbase/regionserver/MutableSegment.html      |     2 +-
 .../regionserver/ReversedKeyValueHeap.html      |     4 +-
 .../regionserver/ReversedMobStoreScanner.html   |     2 +-
 .../regionserver/ReversedStoreScanner.html      |     2 +-
 .../hadoop/hbase/regionserver/Segment.html      |   152 +-
 .../hbase/regionserver/SegmentScanner.html      |   187 +-
 .../hbase/regionserver/StoreFileScanner.html    |     8 +-
 .../hadoop/hbase/regionserver/StoreScanner.html |    87 +-
 .../regionserver/class-use/KeyValueScanner.html |    36 +-
 .../hbase/regionserver/class-use/Segment.html   |    19 +-
 .../hadoop/hbase/regionserver/package-tree.html |    18 +-
 .../regionserver/querymatcher/package-tree.html |     2 +-
 .../hbase/regionserver/wal/package-tree.html    |     2 +-
 .../replication/regionserver/package-tree.html  |     2 +-
 .../hadoop/hbase/rest/model/package-tree.html   |     2 +-
 .../hbase/security/access/package-tree.html     |     4 +-
 .../hadoop/hbase/security/package-tree.html     |     2 +-
 .../hbase/util/CollectionBackedScanner.html     |    44 +-
 ...BaseFsck.CheckRegionConsistencyWorkItem.html |    10 +-
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html     |    90 +-
 .../hbase/util/HBaseFsck.ErrorReporter.html     |    30 +-
 .../hbase/util/HBaseFsck.FileLockCallable.html  |    10 +-
 .../hbase/util/HBaseFsck.HBaseFsckTool.html     |     6 +-
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |    56 +-
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |    14 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |    18 +-
 .../hbase/util/HBaseFsck.OnlineEntry.html       |    10 +-
 .../util/HBaseFsck.PrintingErrorReporter.html   |    42 +-
 .../HBaseFsck.RegionBoundariesInformation.html  |    16 +-
 .../util/HBaseFsck.RegionRepairException.html   |     8 +-
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |    22 +-
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |    20 +-
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  |    38 +-
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   |    12 +-
 .../util/HBaseFsck.WorkItemHdfsRegionInfo.html  |    12 +-
 .../util/HBaseFsck.WorkItemOverlapMerge.html    |    10 +-
 .../hbase/util/HBaseFsck.WorkItemRegion.html    |    16 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.html |   618 +-
 .../apache/hadoop/hbase/util/VersionInfo.html   |    40 +-
 .../apache/hadoop/hbase/util/package-tree.html  |     8 +-
 .../apache/hadoop/hbase/wal/package-tree.html   |     2 +-
 .../org/apache/hadoop/hbase/Version.html        |     6 +-
 .../SimpleRegionNormalizer.PlanComparator.html  |   396 +-
 .../normalizer/SimpleRegionNormalizer.html      |   396 +-
 .../hbase/regionserver/AbstractMemStore.html    |   558 +-
 ...ompactingMemStore.InMemoryFlushRunnable.html |   442 +-
 .../CompactingMemStore.IndexType.html           |   442 +-
 .../hbase/regionserver/CompactingMemStore.html  |   442 +-
 .../regionserver/CompositeImmutableSegment.html |   300 +-
 .../hbase/regionserver/DefaultMemStore.html     |   187 +-
 .../KeyValueHeap.KVScannerComparator.html       |    63 +-
 .../hadoop/hbase/regionserver/KeyValueHeap.html |    63 +-
 .../hbase/regionserver/KeyValueScanner.html     |    14 +-
 .../MemStoreCompactorSegmentsIterator.html      |   211 +-
 .../MemStoreMergerSegmentsIterator.html         |   103 +-
 .../hbase/regionserver/MemStoreSnapshot.html    |     2 +-
 .../hadoop/hbase/regionserver/Segment.html      |   574 +-
 .../hbase/regionserver/SegmentScanner.html      |   698 +-
 .../hadoop/hbase/regionserver/StoreScanner.html |   191 +-
 .../hbase/util/CollectionBackedScanner.html     |    19 +-
 ...BaseFsck.CheckRegionConsistencyWorkItem.html |  9775 +++---
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html     |  9775 +++---
 .../hbase/util/HBaseFsck.ErrorReporter.html     |  9775 +++---
 .../hbase/util/HBaseFsck.FileLockCallable.html  |  9775 +++---
 .../hbase/util/HBaseFsck.HBaseFsckTool.html     |  9775 +++---
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |  9775 +++---
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |  9775 +++---
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |  9775 +++---
 .../hbase/util/HBaseFsck.OnlineEntry.html       |  9775 +++---
 .../util/HBaseFsck.PrintingErrorReporter.html   |  9775 +++---
 .../HBaseFsck.RegionBoundariesInformation.html  |  9775 +++---
 .../util/HBaseFsck.RegionRepairException.html   |  9775 +++---
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |  9775 +++---
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |  9775 +++---
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  |  9775 +++---
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   |  9775 +++---
 .../util/HBaseFsck.WorkItemHdfsRegionInfo.html  |  9775 +++---
 .../util/HBaseFsck.WorkItemOverlapMerge.html    |  9775 +++---
 .../hbase/util/HBaseFsck.WorkItemRegion.html    |  9775 +++---
 .../org/apache/hadoop/hbase/util/HBaseFsck.html |  9775 +++---
 .../apache/hadoop/hbase/util/VersionInfo.html   |    91 +-
 export_control.html                             |     4 +-
 index.html                                      |     4 +-
 integration.html                                |     4 +-
 issue-tracking.html                             |     4 +-
 license.html                                    |     4 +-
 mail-lists.html                                 |     4 +-
 metrics.html                                    |     4 +-
 old_news.html                                   |     4 +-
 plugin-management.html                          |     4 +-
 plugins.html                                    |     4 +-
 poweredbyhbase.html                             |     4 +-
 project-info.html                               |     4 +-
 project-reports.html                            |     4 +-
 project-summary.html                            |     4 +-
 pseudo-distributed.html                         |     4 +-
 replication.html                                |     4 +-
 resources.html                                  |     4 +-
 source-repository.html                          |     4 +-
 sponsors.html                                   |     4 +-
 supportingprojects.html                         |     4 +-
 team-list.html                                  |     4 +-
 ...mClientSideScanExcpetion.MyStoreScanner.html |     4 +-
 .../hbase/regionserver/KeyValueScanFixture.html |     9 +-
 .../TestKeyValueHeap.TestScanner.html           |     6 -
 ...cannerHeartbeatMessages.HeartbeatKVHeap.html |     4 +-
 ...artbeatMessages.HeartbeatReversedKVHeap.html |     4 +-
 .../TestStoreScanner.CellGridStoreScanner.html  |     4 +-
 ....CellWithVersionsNoOptimizeStoreScanner.html |     4 +-
 ...oreScanner.CellWithVersionsStoreScanner.html |     4 +-
 .../TestStoreScanner.KeyValueHeapWithCount.html |     4 +-
 154 files changed, 123375 insertions(+), 122651 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/acid-semantics.html
----------------------------------------------------------------------
diff --git a/acid-semantics.html b/acid-semantics.html
index 8f28305..dd6f320 100644
--- a/acid-semantics.html
+++ b/acid-semantics.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Apache HBase (TM) ACID Properties
@@ -601,7 +601,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 


[37/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
index ef42115..6a4d81b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
@@ -478,7 +478,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">AbstractMemStore</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-java.lang.Iterable-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#clearSnapshot-long-">clearSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionser
 ver/AbstractMemStore.html#getActive--">getActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getConfiguration--">getConfiguration</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getLowest-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">getLowest</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getNextRow-org.apache.hadoop.hbase.Cell-java.util.NavigableSet-">getNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshot--">getSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSize--">getSnapshotSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSizing--">getSnapshotSizing</a>, <a
  href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#resetActive--">resetActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#timeOfOldestEdit--">timeOfOldestEdit</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#toString--">toString</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#upsert-java.lang.Iterable-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">upsert</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-java.lang.Iterable-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#clearSnapshot-long-">clearSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Abstra
 ctMemStore.html#getActive--">getActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getConfiguration--">getConfiguration</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getLowest-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">getLowest</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getNextRow-org.apache.hadoop.hbase.Cell-java.util.NavigableSet-">getNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshot--">getSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSize--">getSnapshotSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSizing--">getSnapshotSizing</a>, <a href="../
 ../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#resetActive--">resetActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#timeOfOldestEdit--">timeOfOldestEdit</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#toString--">toString</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#upsert-java.lang.Iterable-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">upsert</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
@@ -976,7 +976,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getScanners</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.360">getScanners</a>(long&nbsp;readPt)
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.357">getScanners</a>(long&nbsp;readPt)
                                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
@@ -993,7 +993,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>createList</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.375">createList</a>(int&nbsp;capacity)</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.371">createList</a>(int&nbsp;capacity)</pre>
 </li>
 </ul>
 <a name="checkActiveSize--">
@@ -1002,7 +1002,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>checkActiveSize</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.386">checkActiveSize</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.382">checkActiveSize</a>()</pre>
 <div class="block">Check whether anything need to be done based on the current active set size.
  The method is invoked upon every addition to the active set.
  For CompactingMemStore, flush the active set to the read-only memory if it's
@@ -1019,7 +1019,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>flushInMemory</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.405">flushInMemory</a>()
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.401">flushInMemory</a>()
             throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -1033,7 +1033,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getLastSegment</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.438">getLastSegment</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.434">getLastSegment</a>()</pre>
 </li>
 </ul>
 <a name="getFamilyNameInBytes--">
@@ -1042,7 +1042,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getFamilyNameInBytes</h4>
-<pre>private&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.444">getFamilyNameInBytes</a>()</pre>
+<pre>private&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.440">getFamilyNameInBytes</a>()</pre>
 </li>
 </ul>
 <a name="getPool--">
@@ -1051,7 +1051,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getPool</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true" title="class or interface in java.util.concurrent">ThreadPoolExecutor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.448">getPool</a>()</pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true" title="class or interface in java.util.concurrent">ThreadPoolExecutor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.444">getPool</a>()</pre>
 </li>
 </ul>
 <a name="shouldFlushInMemory--">
@@ -1060,7 +1060,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>shouldFlushInMemory</h4>
-<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.453">shouldFlushInMemory</a>()</pre>
+<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.449">shouldFlushInMemory</a>()</pre>
 </li>
 </ul>
 <a name="stopCompaction--">
@@ -1069,7 +1069,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>stopCompaction</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.470">stopCompaction</a>()</pre>
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.466">stopCompaction</a>()</pre>
 <div class="block">The request to cancel the compaction asynchronous task (caused by in-memory flush)
  The compaction may still happen if the request was sent too late
  Non-blocking request</div>
@@ -1081,7 +1081,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>pushActiveToPipeline</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.476">pushActiveToPipeline</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/MutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">MutableSegment</a>&nbsp;active)</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.472">pushActiveToPipeline</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/MutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">MutableSegment</a>&nbsp;active)</pre>
 </li>
 </ul>
 <a name="pushTailToSnapshot--">
@@ -1090,7 +1090,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>pushTailToSnapshot</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.483">pushTailToSnapshot</a>()</pre>
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.479">pushTailToSnapshot</a>()</pre>
 </li>
 </ul>
 <a name="pushPipelineToSnapshot--">
@@ -1099,7 +1099,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>pushPipelineToSnapshot</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.490">pushPipelineToSnapshot</a>()</pre>
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.486">pushPipelineToSnapshot</a>()</pre>
 </li>
 </ul>
 <a name="pushToSnapshot-java.util.List-">
@@ -1108,7 +1108,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>pushToSnapshot</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.513">pushToSnapshot</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">ImmutableSegment</a>&gt;&nbsp;segments)</pre>
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.509">pushToSnapshot</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">ImmutableSegment</a>&gt;&nbsp;segments)</pre>
 </li>
 </ul>
 <a name="getRegionServices--">
@@ -1117,7 +1117,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionServices</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/RegionServicesForStores.html" title="class in org.apache.hadoop.hbase.regionserver">RegionServicesForStores</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.524">getRegionServices</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/RegionServicesForStores.html" title="class in org.apache.hadoop.hbase.regionserver">RegionServicesForStores</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.520">getRegionServices</a>()</pre>
 </li>
 </ul>
 <a name="isMemStoreFlushingInMemory--">
@@ -1126,7 +1126,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>isMemStoreFlushingInMemory</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.549">isMemStoreFlushingInMemory</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.545">isMemStoreFlushingInMemory</a>()</pre>
 </li>
 </ul>
 <a name="getNextRow-org.apache.hadoop.hbase.Cell-">
@@ -1135,7 +1135,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getNextRow</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.558">getNextRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.554">getNextRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
 <dl>
 <dt><span class="paramLabel">Parameters:</span></dt>
 <dd><code>cell</code> - Find the row that comes after this one.  If null, we return the
@@ -1151,7 +1151,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getInmemoryFlushSize</h4>
-<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.572">getInmemoryFlushSize</a>()</pre>
+<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.568">getInmemoryFlushSize</a>()</pre>
 </li>
 </ul>
 <a name="debug--">
@@ -1160,7 +1160,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockListLast">
 <li class="blockList">
 <h4>debug</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.577">debug</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.573">debug</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
index e3ec42d..a4b2e8a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -256,34 +256,26 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 </td>
 </tr>
 <tr id="i12" class="altColor">
-<td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanner-long-long-">getScanner</a></span>(long&nbsp;readPoint,
-          long&nbsp;order)</code>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-</td>
-</tr>
-<tr id="i13" class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanners-long-long-">getScanners</a></span>(long&nbsp;readPoint,
-           long&nbsp;order)</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPoint)</code>&nbsp;</td>
 </tr>
-<tr id="i14" class="altColor">
+<tr id="i13" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.html" title="class in org.apache.hadoop.hbase.regionserver">TimeRangeTracker</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getTimeRangeTracker--">getTimeRangeTracker</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i15" class="rowColor">
+<tr id="i14" class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstKeyOnRow)</code>&nbsp;</td>
 </tr>
-<tr id="i16" class="altColor">
+<tr id="i15" class="rowColor">
 <td class="colFirst"><code>long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#heapSize--">heapSize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i17" class="rowColor">
+<tr id="i16" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#incScannerCount--">incScannerCount</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i18" class="altColor">
+<tr id="i17" class="rowColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#incSize-long-long-long-">incSize</a></span>(long&nbsp;delta,
        long&nbsp;heapOverhead,
@@ -291,37 +283,37 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <div class="block">Updates the heap size counter of the segment by the given delta</div>
 </td>
 </tr>
-<tr id="i19" class="rowColor">
+<tr id="i18" class="altColor">
 <td class="colFirst"><code>protected long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#indexEntrySize--">indexEntrySize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i20" class="altColor">
+<tr id="i19" class="rowColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
            boolean&nbsp;mslabUsed,
            <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</code>&nbsp;</td>
 </tr>
-<tr id="i21" class="rowColor">
+<tr id="i20" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#isEmpty--">isEmpty</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i22" class="altColor">
+<tr id="i21" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#isTagsPresent--">isTagsPresent</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i23" class="rowColor">
+<tr id="i22" class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#iterator--">iterator</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i24" class="altColor">
+<tr id="i23" class="rowColor">
 <td class="colFirst"><code>long</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#keySize--">keySize</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i25" class="rowColor">
+<tr id="i24" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#last--">last</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i26" class="altColor">
+<tr id="i25" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                        boolean&nbsp;forceCloneOfBigCell)</code>
@@ -329,7 +321,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
  otherwise the given cell is returned</div>
 </td>
 </tr>
-<tr id="i27" class="rowColor">
+<tr id="i26" class="altColor">
 <td class="colFirst"><code>protected <a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">CompositeImmutableSegment</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetOld,
           <a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetNew)</code>
@@ -337,22 +329,22 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
  immutable CellSet after its creation in immutable segment constructor</div>
 </td>
 </tr>
-<tr id="i28" class="altColor">
+<tr id="i27" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/io/TimeRange.html" title="class in org.apache.hadoop.hbase.io">TimeRange</a>&nbsp;tr,
           long&nbsp;oldestUnexpiredTS)</code>&nbsp;</td>
 </tr>
-<tr id="i29" class="rowColor">
+<tr id="i28" class="altColor">
 <td class="colFirst"><code>protected <a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstCell)</code>
 <div class="block">Returns a subset of the segment cell set, which starts with the given cell</div>
 </td>
 </tr>
-<tr id="i30" class="altColor">
+<tr id="i29" class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#toString--">toString</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i31" class="rowColor">
+<tr id="i30" class="altColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
               boolean&nbsp;succ,
@@ -557,34 +549,16 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 </dl>
 </li>
 </ul>
-<a name="getScanner-long-long-">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>getScanner</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.128">getScanner</a>(long&nbsp;readPoint,
-                                  long&nbsp;order)</pre>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-<dl>
-<dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
-<dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
-<dt><span class="returnLabel">Returns:</span></dt>
-<dd>a scanner for the given read point</dd>
-</dl>
-</li>
-</ul>
-<a name="getScanners-long-long-">
+<a name="getScanners-long-">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>getScanners</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.133">getScanners</a>(long&nbsp;readPoint,
-                                         long&nbsp;order)</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.125">getScanners</a>(long&nbsp;readPoint)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
-<dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
+<dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
 </dl>
 </li>
 </ul>
@@ -594,7 +568,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>isTagsPresent</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.140">isTagsPresent</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.132">isTagsPresent</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -607,7 +581,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>incScannerCount</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.148">incScannerCount</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.140">incScannerCount</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -620,7 +594,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>decScannerCount</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.153">decScannerCount</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.145">decScannerCount</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -633,7 +607,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>setCellSet</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">CompositeImmutableSegment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.163">setCellSet</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetOld,
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">CompositeImmutableSegment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.155">setCellSet</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetOld,
                                                <a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;cellSetNew)</pre>
 <div class="block">Setting the CellSet of the segment - used only for flat immutable segment for setting
  immutable CellSet after its creation in immutable segment constructor</div>
@@ -651,7 +625,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>indexEntrySize</h4>
-<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.169">indexEntrySize</a>()</pre>
+<pre>protected&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.161">indexEntrySize</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntrySize--">indexEntrySize</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -664,7 +638,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>canBeFlattened</h4>
-<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.173">canBeFlattened</a>()</pre>
+<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.165">canBeFlattened</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html#canBeFlattened--">canBeFlattened</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">ImmutableSegment</a></code></dd>
@@ -677,7 +651,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>keySize</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.181">keySize</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.173">keySize</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -692,7 +666,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>heapSize</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.189">heapSize</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.181">heapSize</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -707,7 +681,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>incSize</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.201">incSize</a>(long&nbsp;delta,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.193">incSize</a>(long&nbsp;delta,
                        long&nbsp;heapOverhead,
                        long&nbsp;offHeapOverhead)</pre>
 <div class="block">Updates the heap size counter of the segment by the given delta</div>
@@ -723,7 +697,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>getMinSequenceId</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.206">getMinSequenceId</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.198">getMinSequenceId</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -736,7 +710,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>getTimeRangeTracker</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.html" title="class in org.apache.hadoop.hbase.regionserver">TimeRangeTracker</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.211">getTimeRangeTracker</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.html" title="class in org.apache.hadoop.hbase.regionserver">TimeRangeTracker</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.203">getTimeRangeTracker</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -749,7 +723,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>last</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.217">last</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.209">last</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -762,7 +736,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>iterator</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.222">iterator</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.214">iterator</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -775,7 +749,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>headSet</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.227">headSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstKeyOnRow)</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.219">headSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstKeyOnRow)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -788,7 +762,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>compare</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.232">compare</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.224">compare</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
                    <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;right)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
@@ -802,7 +776,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>compareRows</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.237">compareRows</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.229">compareRows</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;left,
                        <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;right)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
@@ -816,7 +790,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>getCellSet</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.245">getCellSet</a>()</pre>
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/CellSet.html" title="class in org.apache.hadoop.hbase.regionserver">CellSet</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.237">getCellSet</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></dd>
@@ -831,7 +805,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>internalAdd</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.250">internalAdd</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.242">internalAdd</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                            boolean&nbsp;mslabUsed,
                            <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 <dl>
@@ -846,7 +820,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>updateMetaInfo</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.255">updateMetaInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.247">updateMetaInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cellToAdd,
                               boolean&nbsp;succ,
                               boolean&nbsp;mslabUsed,
                               <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
@@ -862,7 +836,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>tailSet</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.266">tailSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstCell)</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.258">tailSet</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;firstCell)</pre>
 <div class="block">Returns a subset of the segment cell set, which starts with the given cell</div>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
@@ -880,7 +854,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockList">
 <li class="blockList">
 <h4>dump</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.275">dump</a>(org.slf4j.Logger&nbsp;log)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.267">dump</a>(org.slf4j.Logger&nbsp;log)</pre>
 <div class="block">Dumps all cells of the segment into the given log</div>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
@@ -894,7 +868,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <ul class="blockListLast">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.282">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#line.274">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html#toString--">toString</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">ImmutableSegment</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html b/devapidocs/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
index 5435287..b9a50c8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
@@ -284,7 +284,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html" title="class in org.apache.hadoop.hbase.regionserver">AbstractMemStore</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-java.lang.Iterable-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#clearSnapshot-long-">clearSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionser
 ver/AbstractMemStore.html#getActive--">getActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getConfiguration--">getConfiguration</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getLowest-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">getLowest</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getNextRow-org.apache.hadoop.hbase.Cell-java.util.NavigableSet-">getNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshot--">getSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSize--">getSnapshotSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSizing--">getSnapshotSizing</a>, <a
  href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#resetActive--">resetActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#timeOfOldestEdit--">timeOfOldestEdit</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#toString--">toString</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#upsert-java.lang.Iterable-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">upsert</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#add-java.lang.Iterable-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">add</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">addToScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#clearSnapshot-long-">clearSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Abstra
 ctMemStore.html#getActive--">getActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getConfiguration--">getConfiguration</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getLowest-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">getLowest</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getNextRow-org.apache.hadoop.hbase.Cell-java.util.NavigableSet-">getNextRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshot--">getSnapshot</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSize--">getSnapshotSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#getSnapshotSizing--">getSnapshotSizing</a>, <a href="../
 ../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#resetActive--">resetActive</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#timeOfOldestEdit--">timeOfOldestEdit</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#toString--">toString</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#upsert-java.lang.Iterable-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">upsert</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
@@ -465,7 +465,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getSegments</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.135">getSegments</a>()
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.134">getSegments</a>()
                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -483,7 +483,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>getNextRow</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.147">getNextRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.146">getNextRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
 <dl>
 <dt><span class="paramLabel">Parameters:</span></dt>
 <dd><code>cell</code> - Find the row that comes after this one.  If null, we return the
@@ -499,7 +499,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>updateLowestUnflushedSequenceIdInWAL</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.153">updateLowestUnflushedSequenceIdInWAL</a>(boolean&nbsp;onlyIfMoreRecent)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.152">updateLowestUnflushedSequenceIdInWAL</a>(boolean&nbsp;onlyIfMoreRecent)</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#updateLowestUnflushedSequenceIdInWAL-boolean-">AbstractMemStore</a></code></span></div>
 <div class="block">Updates the wal with the lowest sequence id (oldest entry) that is still in memory</div>
 <dl>
@@ -517,7 +517,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>size</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSize.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSize</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.157">size</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSize.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSize</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.156">size</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>Total memory occupied by this MemStore. This won't include any size occupied by the
@@ -533,7 +533,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>checkActiveSize</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.166">checkActiveSize</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.165">checkActiveSize</a>()</pre>
 <div class="block">Check whether anything need to be done based on the current active set size
  Nothing need to be done for the DefaultMemStore</div>
 <dl>
@@ -548,7 +548,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>preFlushSeqIDEstimation</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.171">preFlushSeqIDEstimation</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.170">preFlushSeqIDEstimation</a>()</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#preFlushSeqIDEstimation--">MemStore</a></code></span></div>
 <div class="block">This method is called before the flush is executed.</div>
 <dl>
@@ -564,7 +564,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockList">
 <li class="blockList">
 <h4>isSloppy</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.175">isSloppy</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.174">isSloppy</a>()</pre>
 </li>
 </ul>
 <a name="main-java.lang.String:A-">
@@ -573,7 +573,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMem
 <ul class="blockListLast">
 <li class="blockList">
 <h4>main</h4>
-<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.186">main</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</pre>
+<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#line.185">main</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</pre>
 <div class="block">Code to help figure if our approximation of object heap sizes is close
  enough.  See hbase-900.  Fills memstores then waits so user can heap
  dump and bring up resultant hprof in something like jprofiler which

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
index bbfc7c7..b7b243d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
@@ -236,7 +236,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.htm
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache
 .hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntrySize--">indexEntrySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cel
 l-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChan
 ge</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--
 ">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntrySize--">indexEntrySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/ha
 doop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.r
 egionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">


[05/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="source

<TRUNCATED>

[07/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed to create lock file

<TRUNCATED>

[34/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
index 738622a..00caf7f 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -145,26 +145,16 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#current">current</a></span></code>&nbsp;</td>
 </tr>
 <tr class="altColor">
-<td class="colFirst"><code>private static long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#DEFAULT_SCANNER_ORDER">DEFAULT_SCANNER_ORDER</a></span></code>&nbsp;</td>
-</tr>
-<tr class="rowColor">
 <td class="colFirst"><code>protected <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#iter">iter</a></span></code>&nbsp;</td>
 </tr>
-<tr class="altColor">
+<tr class="rowColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#last">last</a></span></code>&nbsp;</td>
 </tr>
-<tr class="rowColor">
-<td class="colFirst"><code>private long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#readPoint">readPoint</a></span></code>&nbsp;</td>
-</tr>
 <tr class="altColor">
 <td class="colFirst"><code>private long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#scannerOrder">scannerOrder</a></span></code>
-<div class="block">Order of this scanner relative to other scanners.</div>
-</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#readPoint">readPoint</a></span></code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><code>protected <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></td>
@@ -199,13 +189,9 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <tr class="altColor">
 <td class="colFirst"><code>protected </code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-">SegmentScanner</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
-              long&nbsp;readPoint)</code>&nbsp;</td>
-</tr>
-<tr class="rowColor">
-<td class="colFirst"><code>protected </code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-long-">SegmentScanner</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
-              long&nbsp;readPoint,
-              long&nbsp;scannerOrder)</code>&nbsp;</td>
+              long&nbsp;readPoint)</code>
+<div class="block">Scanners are ordered from 0 (oldest) to newest in increasing order.</div>
+</td>
 </tr>
 </table>
 </li>
@@ -265,34 +251,28 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#getNextIndexedKey--">getNextIndexedKey</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i7" class="rowColor">
-<td class="colFirst"><code>long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#getScannerOrder--">getScannerOrder</a></span>()</code>
-<div class="block">Get the order of this KeyValueScanner.</div>
-</td>
-</tr>
-<tr id="i8" class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#getSegment--">getSegment</a></span>()</code>
 <div class="block">Private Methods</div>
 </td>
 </tr>
-<tr id="i9" class="rowColor">
+<tr id="i8" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#isFileScanner--">isFileScanner</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i10" class="altColor">
+<tr id="i9" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#next--">next</a></span>()</code>
 <div class="block">Return the next Cell in this scanner, iterating the scanner</div>
 </td>
 </tr>
-<tr id="i11" class="rowColor">
+<tr id="i10" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#peek--">peek</a></span>()</code>
 <div class="block">Look at the next Cell in this scanner, but do not iterate the scanner</div>
 </td>
 </tr>
-<tr id="i12" class="altColor">
+<tr id="i11" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#realSeekDone--">realSeekDone</a></span>()</code>
 <div class="block">This scanner is working solely on the in-memory MemStore and doesn't work on
@@ -300,7 +280,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
  therefore always returning true.</div>
 </td>
 </tr>
-<tr id="i13" class="rowColor">
+<tr id="i12" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#requestSeek-org.apache.hadoop.hbase.Cell-boolean-boolean-">requestSeek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;c,
            boolean&nbsp;forward,
@@ -310,38 +290,38 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
  row/column combination specified by the kv parameter.</div>
 </td>
 </tr>
-<tr id="i14" class="altColor">
+<tr id="i13" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#reseek-org.apache.hadoop.hbase.Cell-">reseek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>
 <div class="block">Reseek the scanner at or after the specified KeyValue.</div>
 </td>
 </tr>
-<tr id="i15" class="rowColor">
+<tr id="i14" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#seek-org.apache.hadoop.hbase.Cell-">seek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>
 <div class="block">Seek the scanner at or after the specified Cell.</div>
 </td>
 </tr>
-<tr id="i16" class="altColor">
+<tr id="i15" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#seekToLastRow--">seekToLastRow</a></span>()</code>
 <div class="block">Seek the scanner at the first KeyValue of last row</div>
 </td>
 </tr>
-<tr id="i17" class="rowColor">
+<tr id="i16" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#seekToPreviousRow-org.apache.hadoop.hbase.Cell-">seekToPreviousRow</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>
 <div class="block">Seek the scanner at the first Cell of the row which is the previous row
  of specified key</div>
 </td>
 </tr>
-<tr id="i18" class="altColor">
+<tr id="i17" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#shipped--">shipped</a></span>()</code>
 <div class="block">Called after a batch of rows scanned (RPC) and set to be returned to client.</div>
 </td>
 </tr>
-<tr id="i19" class="rowColor">
+<tr id="i18" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></span>(<a href="../../../../../org/apache/hadoop/hbase/client/Scan.html" title="class in org.apache.hadoop.hbase.client">Scan</a>&nbsp;scan,
                 <a href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html" title="class in org.apache.hadoop.hbase.regionserver">HStore</a>&nbsp;store,
@@ -350,11 +330,11 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
  MemStoreScanner, currently returns true as default.</div>
 </td>
 </tr>
-<tr id="i20" class="altColor">
+<tr id="i19" class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#toString--">toString</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i21" class="rowColor">
+<tr id="i20" class="altColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/SegmentScanner.html#updateCurrent--">updateCurrent</a></span>()</code>
 <div class="block">Private internal method for iterating over the segment,
@@ -369,6 +349,13 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <h3>Methods inherited from class&nbsp;java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></h3>
 <code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--" title="class or interface in java.lang">clone</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-" title="class or interface in java.lang">equals</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--" title="class or interface in java.lang">finalize</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--" title="class or interface in java.lang">getClass</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--" title="class or interface in java.lang">hashCode</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--" title="class or interface in java.lang">notify</a>, <a href="https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--" title="class or interface in java.lang">wait</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-" title="class or interface in java.lang">wait</a>, <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-" title="class or interface in java.lang">wait</a></code></li>
 </ul>
+<ul class="blockList">
+<li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.KeyValueScanner">
+<!--   -->
+</a>
+<h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></h3>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></code></li>
+</ul>
 </li>
 </ul>
 </li>
@@ -383,37 +370,13 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <!--   -->
 </a>
 <h3>Field Detail</h3>
-<a name="scannerOrder">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>scannerOrder</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.42">scannerOrder</a></pre>
-<div class="block">Order of this scanner relative to other scanners. See
- <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--"><code>KeyValueScanner.getScannerOrder()</code></a>.</div>
-</li>
-</ul>
-<a name="DEFAULT_SCANNER_ORDER">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>DEFAULT_SCANNER_ORDER</h4>
-<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.43">DEFAULT_SCANNER_ORDER</a></pre>
-<dl>
-<dt><span class="seeLabel">See Also:</span></dt>
-<dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.regionserver.SegmentScanner.DEFAULT_SCANNER_ORDER">Constant Field Values</a></dd>
-</dl>
-</li>
-</ul>
 <a name="segment">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>segment</h4>
-<pre>protected final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.46">segment</a></pre>
+<pre>protected final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.40">segment</a></pre>
 </li>
 </ul>
 <a name="readPoint">
@@ -422,7 +385,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>readPoint</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.48">readPoint</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.42">readPoint</a></pre>
 </li>
 </ul>
 <a name="iter">
@@ -431,7 +394,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>iter</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.51">iter</a></pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.45">iter</a></pre>
 </li>
 </ul>
 <a name="current">
@@ -440,7 +403,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>current</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.53">current</a></pre>
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.47">current</a></pre>
 </li>
 </ul>
 <a name="stopSkippingKVsIfNextRow">
@@ -449,7 +412,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>stopSkippingKVsIfNextRow</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.57">stopSkippingKVsIfNextRow</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.51">stopSkippingKVsIfNextRow</a></pre>
 </li>
 </ul>
 <a name="last">
@@ -458,7 +421,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>last</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.59">last</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.53">last</a></pre>
 </li>
 </ul>
 <a name="closed">
@@ -467,7 +430,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockListLast">
 <li class="blockList">
 <h4>closed</h4>
-<pre>protected&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.62">closed</a></pre>
+<pre>protected&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.56">closed</a></pre>
 </li>
 </ul>
 </li>
@@ -481,27 +444,12 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <a name="SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-">
 <!--   -->
 </a>
-<ul class="blockList">
-<li class="blockList">
-<h4>SegmentScanner</h4>
-<pre>protected&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.64">SegmentScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
-                         long&nbsp;readPoint)</pre>
-</li>
-</ul>
-<a name="SegmentScanner-org.apache.hadoop.hbase.regionserver.Segment-long-long-">
-<!--   -->
-</a>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>SegmentScanner</h4>
-<pre>protected&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.72">SegmentScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
-                         long&nbsp;readPoint,
-                         long&nbsp;scannerOrder)</pre>
-<dl>
-<dt><span class="paramLabel">Parameters:</span></dt>
-<dd><code>scannerOrder</code> - see <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--"><code>KeyValueScanner.getScannerOrder()</code></a>.
- Scanners are ordered from 0 (oldest) to newest in increasing order.</dd>
-</dl>
+<pre>protected&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.62">SegmentScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
+                         long&nbsp;readPoint)</pre>
+<div class="block">Scanners are ordered from 0 (oldest) to newest in increasing order.</div>
 </li>
 </ul>
 </li>
@@ -518,7 +466,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>peek</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.92">peek</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.81">peek</a>()</pre>
 <div class="block">Look at the next Cell in this scanner, but do not iterate the scanner</div>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -534,7 +482,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>next</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.108">next</a>()
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.97">next</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Return the next Cell in this scanner, iterating the scanner</div>
 <dl>
@@ -553,7 +501,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>seek</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.123">seek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.112">seek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)
              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Seek the scanner at or after the specified Cell.</div>
 <dl>
@@ -574,7 +522,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getIterator</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.139">getIterator</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true" title="class or interface in java.util">Iterator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.128">getIterator</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
 </li>
 </ul>
 <a name="reseek-org.apache.hadoop.hbase.Cell-">
@@ -583,7 +531,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>reseek</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.153">reseek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.142">reseek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)
                throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Reseek the scanner at or after the specified KeyValue.
  This method is guaranteed to seek at or after the required key only if the
@@ -607,7 +555,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>backwardSeek</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.181">backwardSeek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;key)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.170">backwardSeek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;key)
                      throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Seek the scanner at or before the row of specified Cell, it firstly
  tries to seek the scanner at or after the specified Cell, return if
@@ -632,7 +580,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>seekToPreviousRow</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.201">seekToPreviousRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.190">seekToPreviousRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)
                           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Seek the scanner at the first Cell of the row which is the previous row
  of specified key</div>
@@ -655,7 +603,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>seekToLastRow</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.237">seekToLastRow</a>()
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.226">seekToLastRow</a>()
                       throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Seek the scanner at the first KeyValue of last row</div>
 <dl>
@@ -668,34 +616,13 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 </dl>
 </li>
 </ul>
-<a name="getScannerOrder--">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>getScannerOrder</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.259">getScannerOrder</a>()</pre>
-<div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">KeyValueScanner</a></code></span></div>
-<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and
- MemStoreScanners (other scanners simply return 0). This is required for comparing multiple
- files to find out which one has the latest data. StoreFileScanners are ordered from 0
- (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always
- contains freshest data.</div>
-<dl>
-<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
-<dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
-<dt><span class="seeLabel">See Also:</span></dt>
-<dd><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--"><code>KeyValueScanner.getScannerOrder()</code></a></dd>
-</dl>
-</li>
-</ul>
 <a name="close--">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>close</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.267">close</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.249">close</a>()</pre>
 <div class="block">Close the KeyValue scanner.</div>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -713,7 +640,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>shouldUseScanner</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.282">shouldUseScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/client/Scan.html" title="class in org.apache.hadoop.hbase.client">Scan</a>&nbsp;scan,
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.264">shouldUseScanner</a>(<a href="../../../../../org/apache/hadoop/hbase/client/Scan.html" title="class in org.apache.hadoop.hbase.client">Scan</a>&nbsp;scan,
                                 <a href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html" title="class in org.apache.hadoop.hbase.regionserver">HStore</a>&nbsp;store,
                                 long&nbsp;oldestUnexpiredTS)</pre>
 <div class="block">This functionality should be resolved in the higher level which is
@@ -739,7 +666,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>requestSeek</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.288">requestSeek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;c,
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.270">requestSeek</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;c,
                            boolean&nbsp;forward,
                            boolean&nbsp;useBloom)
                     throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
@@ -766,7 +693,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>realSeekDone</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.299">realSeekDone</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.281">realSeekDone</a>()</pre>
 <div class="block">This scanner is working solely on the in-memory MemStore and doesn't work on
  store files, MutableCellSetSegmentScanner always does the seek,
  therefore always returning true.</div>
@@ -782,7 +709,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>enforceSeek</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.309">enforceSeek</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.291">enforceSeek</a>()
                  throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">This function should be never called on scanners that always do real seek operations (i.e. most
  of the scanners and also this one). The easiest way to achieve this is to call
@@ -801,7 +728,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>isFileScanner</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.317">isFileScanner</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.299">isFileScanner</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
@@ -816,7 +743,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getFilePath</h4>
-<pre>public&nbsp;org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.322">getFilePath</a>()</pre>
+<pre>public&nbsp;org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.304">getFilePath</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
@@ -833,7 +760,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getNextIndexedKey</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.332">getNextIndexedKey</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.314">getNextIndexedKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getNextIndexedKey--">getNextIndexedKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
@@ -850,7 +777,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>shipped</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.341">shipped</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.323">shipped</a>()
              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Called after a batch of rows scanned (RPC) and set to be returned to client. Any in between
  cleanup can be done here. Nothing to be done for MutableCellSetSegmentScanner.</div>
@@ -868,7 +795,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.347">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.329">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>
@@ -881,7 +808,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getSegment</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.356">getSegment</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.338">getSegment</a>()</pre>
 <div class="block">Private Methods</div>
 </li>
 </ul>
@@ -891,7 +818,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>updateCurrent</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.364">updateCurrent</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.346">updateCurrent</a>()</pre>
 <div class="block">Private internal method for iterating over the segment,
  skipping the cells with irrelevant MVCC</div>
 </li>
@@ -902,7 +829,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockListLast">
 <li class="blockList">
 <h4>getHighest</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.396">getHighest</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;first,
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html#line.378">getHighest</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;first,
                         <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;second)</pre>
 <div class="block">Private internal method that returns the higher of the two key values, or null
  if they are both null</div>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
index 05e3366..26dff49 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
@@ -899,11 +899,9 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <h4>getScannerOrder</h4>
 <pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html#line.369">getScannerOrder</a>()</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">KeyValueScanner</a></code></span></div>
-<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and
- MemStoreScanners (other scanners simply return 0). This is required for comparing multiple
- files to find out which one has the latest data. StoreFileScanners are ordered from 0
- (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always
- contains freshest data.</div>
+<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners.
+ This is required for comparing multiple files to find out which one has the latest
+ data. StoreFileScanners are ordered from 0 (oldest) to newest in increasing order.</div>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index 7370b94..101b23a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -504,93 +504,87 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getReadPoint--">getReadPoint</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i11" class="rowColor">
-<td class="colFirst"><code>long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#getScannerOrder--">getScannerOrder</a></span>()</code>
-<div class="block">Get the order of this KeyValueScanner.</div>
-</td>
-</tr>
-<tr id="i12" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#isScanUsePread--">isScanUsePread</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i13" class="rowColor">
+<tr id="i12" class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html" title="enum in org.apache.hadoop.hbase.regionserver">ScannerContext.NextState</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#needToReturn-java.util.List-">needToReturn</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;outResult)</code>
 <div class="block">If the top cell won't be flushed into disk, the new top cell may be
  changed after #reopenAfterFlush.</div>
 </td>
 </tr>
-<tr id="i14" class="altColor">
+<tr id="i13" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/KeyValue.html" title="class in org.apache.hadoop.hbase">KeyValue</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next--">next</a></span>()</code>
 <div class="block">Return the next Cell in this scanner, iterating the scanner</div>
 </td>
 </tr>
-<tr id="i15" class="rowColor">
+<tr id="i14" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#next-java.util.List-org.apache.hadoop.hbase.regionserver.ScannerContext-">next</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;outResult,
     <a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.html" title="class in org.apache.hadoop.hbase.regionserver">ScannerContext</a>&nbsp;scannerContext)</code>
 <div class="block">Get the next row of values from this Store.</div>
 </td>
 </tr>
-<tr id="i16" class="altColor">
+<tr id="i15" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#parallelSeek-java.util.List-org.apache.hadoop.hbase.Cell-">parallelSeek</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners,
             <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;kv)</code>
 <div class="block">Seek storefiles in parallel to optimize IO latency as much as possible</div>
 </td>
 </tr>
-<tr id="i17" class="rowColor">
+<tr id="i16" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#peek--">peek</a></span>()</code>
 <div class="block">Look at the next Cell in this scanner, but do not iterate scanner.</div>
 </td>
 </tr>
-<tr id="i18" class="altColor">
+<tr id="i17" class="rowColor">
 <td class="colFirst"><code>protected boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reopenAfterFlush--">reopenAfterFlush</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i19" class="rowColor">
+<tr id="i18" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#reseek-org.apache.hadoop.hbase.Cell-">reseek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;kv)</code>
 <div class="block">Reseek the scanner at or after the specified KeyValue.</div>
 </td>
 </tr>
-<tr id="i20" class="altColor">
+<tr id="i19" class="rowColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#resetKVHeap-java.util.List-org.apache.hadoop.hbase.CellComparator-">resetKVHeap</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners,
            <a href="../../../../../org/apache/hadoop/hbase/CellComparator.html" title="interface in org.apache.hadoop.hbase">CellComparator</a>&nbsp;comparator)</code>&nbsp;</td>
 </tr>
-<tr id="i21" class="rowColor">
+<tr id="i20" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#resetQueryMatcher-org.apache.hadoop.hbase.Cell-">resetQueryMatcher</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;lastTopKey)</code>&nbsp;</td>
 </tr>
-<tr id="i22" class="altColor">
+<tr id="i21" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seek-org.apache.hadoop.hbase.Cell-">seek</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;key)</code>
 <div class="block">Seek the scanner at or after the specified KeyValue.</div>
 </td>
 </tr>
-<tr id="i23" class="rowColor">
+<tr id="i22" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekAllScanner-org.apache.hadoop.hbase.regionserver.ScanInfo-java.util.List-">seekAllScanner</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScanInfo.html" title="class in org.apache.hadoop.hbase.regionserver">ScanInfo</a>&nbsp;scanInfo,
               <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
-<tr id="i24" class="altColor">
+<tr id="i23" class="rowColor">
 <td class="colFirst"><code>protected boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekAsDirection-org.apache.hadoop.hbase.Cell-">seekAsDirection</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;kv)</code>
 <div class="block">Do a reseek in a normal StoreScanner(scan forward)</div>
 </td>
 </tr>
-<tr id="i25" class="rowColor">
+<tr id="i24" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekOrSkipToNextColumn-org.apache.hadoop.hbase.Cell-">seekOrSkipToNextColumn</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>&nbsp;</td>
 </tr>
-<tr id="i26" class="altColor">
+<tr id="i25" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekOrSkipToNextRow-org.apache.hadoop.hbase.Cell-">seekOrSkipToNextRow</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>&nbsp;</td>
 </tr>
-<tr id="i27" class="rowColor">
+<tr id="i26" class="altColor">
 <td class="colFirst"><code>protected void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekScanners-java.util.List-org.apache.hadoop.hbase.Cell-boolean-boolean-">seekScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners,
             <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;seekKey,
@@ -599,40 +593,40 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <div class="block">Seek the specified scanners with the given key</div>
 </td>
 </tr>
-<tr id="i28" class="altColor">
+<tr id="i27" class="rowColor">
 <td class="colFirst"><code>protected boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#seekToNextRow-org.apache.hadoop.hbase.Cell-">seekToNextRow</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;c)</code>&nbsp;</td>
 </tr>
-<tr id="i29" class="rowColor">
+<tr id="i28" class="altColor">
 <td class="colFirst"><code>protected <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#selectScannersFrom-org.apache.hadoop.hbase.regionserver.HStore-java.util.List-">selectScannersFrom</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html" title="class in org.apache.hadoop.hbase.regionserver">HStore</a>&nbsp;store,
                   <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;allScanners)</code>
 <div class="block">Filters the given list of scanners using Bloom filter, time range, and TTL.</div>
 </td>
 </tr>
-<tr id="i30" class="altColor">
+<tr id="i29" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#shipped--">shipped</a></span>()</code>
 <div class="block">Called after a batch of rows scanned and set to be returned to client.</div>
 </td>
 </tr>
-<tr id="i31" class="rowColor">
+<tr id="i30" class="altColor">
 <td class="colFirst"><code>protected boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextColumn-org.apache.hadoop.hbase.Cell-">trySkipToNextColumn</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>
 <div class="block">See <a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Cell-"><code>trySkipToNextRow(Cell)</code></a></div>
 </td>
 </tr>
-<tr id="i32" class="altColor">
+<tr id="i31" class="rowColor">
 <td class="colFirst"><code>protected boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySkipToNextRow-org.apache.hadoop.hbase.Cell-">trySkipToNextRow</a></span>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</code>
 <div class="block">See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109).</div>
 </td>
 </tr>
-<tr id="i33" class="rowColor">
+<tr id="i32" class="altColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#trySwitchToStreamRead--">trySwitchToStreamRead</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i34" class="altColor">
+<tr id="i33" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#updateReaders-java.util.List-java.util.List-">updateReaders</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/HStoreFile.html" title="class in org.apache.hadoop.hbase.regionserver">HStoreFile</a>&gt;&nbsp;sfs,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;memStoreScanners)</code>
@@ -666,7 +660,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <!--   -->
 </a>
 <h3>Methods inherited from interface&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#backwardSeek-org.apache.hadoop.hbase.Cell-">backwardSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#requestSeek-org.apache.hadoop.hbase.Cell-boolean-boolean-">requestSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToLastRow--">seekToLastRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToPreviousRow-org.apach
 e.hadoop.hbase.Cell-">seekToPreviousRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#backwardSeek-org.apache.hadoop.hbase.Cell-">backwardSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#enforceSeek--">enforceSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getFilePath--">getFilePath</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#isFileScanner--">isFileScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#realSeekDone--">realSeekDone</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#requestSeek-org.apache.hadoop.hbase.Cell-boolean-boolean-">requestSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToLastRow--">seekTo
 LastRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#seekToPreviousRow-org.apache.hadoop.hbase.Cell-">seekToPreviousRow</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#shouldUseScanner-org.apache.hadoop.hbase.client.Scan-org.apache.hadoop.hbase.regionserver.HStore-long-">shouldUseScanner</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.org.apache.hadoop.hbase.regionserver.InternalScanner">
@@ -1776,34 +1770,13 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <pre>protected final&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1030">checkFlushed</a>()</pre>
 </li>
 </ul>
-<a name="getScannerOrder--">
-<!--   -->
-</a>
-<ul class="blockList">
-<li class="blockList">
-<h4>getScannerOrder</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1053">getScannerOrder</a>()</pre>
-<div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">KeyValueScanner</a></code></span></div>
-<div class="block">Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and
- MemStoreScanners (other scanners simply return 0). This is required for comparing multiple
- files to find out which one has the latest data. StoreFileScanners are ordered from 0
- (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always
- contains freshest data.</div>
-<dl>
-<dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
-<dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--">getScannerOrder</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
-<dt><span class="seeLabel">See Also:</span></dt>
-<dd><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getScannerOrder--"><code>KeyValueScanner.getScannerOrder()</code></a></dd>
-</dl>
-</li>
-</ul>
 <a name="parallelSeek-java.util.List-org.apache.hadoop.hbase.Cell-">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>parallelSeek</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1063">parallelSeek</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners,
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1056">parallelSeek</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners,
                           <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;kv)
                    throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Seek storefiles in parallel to optimize IO latency as much as possible</div>
@@ -1822,7 +1795,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getAllScannersForTesting</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1099">getAllScannersForTesting</a>()</pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1092">getAllScannersForTesting</a>()</pre>
 <div class="block">Used in testing.</div>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
@@ -1836,7 +1809,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>enableLazySeekGlobally</h4>
-<pre>static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1109">enableLazySeekGlobally</a>(boolean&nbsp;enable)</pre>
+<pre>static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1102">enableLazySeekGlobally</a>(boolean&nbsp;enable)</pre>
 </li>
 </ul>
 <a name="getEstimatedNumberOfKvsScanned--">
@@ -1845,7 +1818,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getEstimatedNumberOfKvsScanned</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1116">getEstimatedNumberOfKvsScanned</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1109">getEstimatedNumberOfKvsScanned</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>The estimated number of KVs seen by this scanner (includes some skipped KVs).</dd>
@@ -1858,7 +1831,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockList">
 <li class="blockList">
 <h4>getNextIndexedKey</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1121">getNextIndexedKey</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1114">getNextIndexedKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html#getNextIndexedKey--">getNextIndexedKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></dd>
@@ -1877,7 +1850,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValue
 <ul class="blockListLast">
 <li class="blockList">
 <h4>shipped</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1126">shipped</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html#line.1119">shipped</a>()
              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Shipper.html#shipped--">Shipper</a></code></span></div>
 <div class="block">Called after a batch of rows scanned and set to be returned to client. Any in between cleanup

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
index 095177c..14aac7f 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
@@ -272,20 +272,6 @@
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><code><a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></td>
-<td class="colLast"><span class="typeNameLabel">CompositeImmutableSegment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanner-long-long-">getScanner</a></span>(long&nbsp;readPoint,
-          long&nbsp;order)</code>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-</td>
-</tr>
-<tr class="altColor">
-<td class="colFirst"><code><a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></td>
-<td class="colLast"><span class="typeNameLabel">Segment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a></span>(long&nbsp;readPoint,
-          long&nbsp;order)</code>
-<div class="block">Creates the scanner for the given read point, and a specific order in a list</div>
-</td>
-</tr>
-<tr class="rowColor">
-<td class="colFirst"><code><a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a></code></td>
 <td class="colLast"><span class="typeNameLabel">HStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#getScanner-org.apache.hadoop.hbase.client.Scan-java.util.NavigableSet-long-">getScanner</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/client/Scan.html" title="class in org.apache.hadoop.hbase.client">Scan</a>&nbsp;scan,
           <a href="https://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true" title="class or interface in java.util">NavigableSet</a>&lt;byte[]&gt;&nbsp;targetCols,
           long&nbsp;readPt)</code>
@@ -388,25 +374,23 @@
 </tr>
 <tr class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><span class="typeNameLabel">MemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPt)</code>&nbsp;</td>
+<td class="colLast"><span class="typeNameLabel">CompositeImmutableSegment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPoint)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><span class="typeNameLabel">CompactingMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPt)</code>&nbsp;</td>
+<td class="colLast"><span class="typeNameLabel">Segment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPoint)</code>&nbsp;</td>
 </tr>
 <tr class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><span class="typeNameLabel">DefaultMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPt)</code>&nbsp;</td>
+<td class="colLast"><span class="typeNameLabel">MemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPt)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><span class="typeNameLabel">CompositeImmutableSegment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html#getScanners-long-long-">getScanners</a></span>(long&nbsp;readPoint,
-           long&nbsp;order)</code>&nbsp;</td>
+<td class="colLast"><span class="typeNameLabel">CompactingMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPt)</code>&nbsp;</td>
 </tr>
 <tr class="altColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
-<td class="colLast"><span class="typeNameLabel">Segment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a></span>(long&nbsp;readPoint,
-           long&nbsp;order)</code>&nbsp;</td>
+<td class="colLast"><span class="typeNameLabel">DefaultMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/DefaultMemStore.html#getScanners-long-">getScanners</a></span>(long&nbsp;readPt)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><code><a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;</code></td>
@@ -484,17 +468,15 @@
 <td class="colLast"><span class="typeNameLabel">StoreScanner.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#addCurrentScanners-java.util.List-">addCurrentScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
-<td class="colFirst"><code>static long</code></td>
-<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-long-java.util.List-">addToScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
+<td class="colFirst"><code>static void</code></td>
+<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-java.util.List-">addToScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
              long&nbsp;readPt,
-             long&nbsp;order,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr class="altColor">
-<td class="colFirst"><code>protected static long</code></td>
-<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">addToScanners</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
+<td class="colFirst"><code>protected static void</code></td>
+<td class="colLast"><span class="typeNameLabel">AbstractMemStore.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">addToScanners</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
              long&nbsp;readPt,
-             long&nbsp;order,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr class="rowColor">


[17/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed

<TRUNCATED>

[26/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
index 22e7059..01b8a09 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
@@ -362,235 +362,231 @@
 <span class="sourceLineNo">354</span>  }<a name="line.354"></a>
 <span class="sourceLineNo">355</span><a name="line.355"></a>
 <span class="sourceLineNo">356</span>  @Override<a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /*<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Scanners are ordered from 0 (oldest) to newest in increasing order.<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   */<a name="line.359"></a>
-<span class="sourceLineNo">360</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    MutableSegment activeTmp = active;<a name="line.361"></a>
-<span class="sourceLineNo">362</span>    List&lt;? extends Segment&gt; pipelineList = pipeline.getSegments();<a name="line.362"></a>
-<span class="sourceLineNo">363</span>    List&lt;? extends Segment&gt; snapshotList = snapshot.getAllSegments();<a name="line.363"></a>
-<span class="sourceLineNo">364</span>    long order = 1L + pipelineList.size() + snapshotList.size();<a name="line.364"></a>
-<span class="sourceLineNo">365</span>    // The list of elements in pipeline + the active element + the snapshot segment<a name="line.365"></a>
-<span class="sourceLineNo">366</span>    // The order is the Segment ordinal<a name="line.366"></a>
-<span class="sourceLineNo">367</span>    List&lt;KeyValueScanner&gt; list = createList((int) order);<a name="line.367"></a>
-<span class="sourceLineNo">368</span>    order = addToScanners(activeTmp, readPt, order, list);<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    order = addToScanners(pipelineList, readPt, order, list);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    addToScanners(snapshotList, readPt, order, list);<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    return list;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>  }<a name="line.372"></a>
-<span class="sourceLineNo">373</span><a name="line.373"></a>
-<span class="sourceLineNo">374</span>   @VisibleForTesting<a name="line.374"></a>
-<span class="sourceLineNo">375</span>   protected List&lt;KeyValueScanner&gt; createList(int capacity) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>     return new ArrayList&lt;&gt;(capacity);<a name="line.376"></a>
-<span class="sourceLineNo">377</span>   }<a name="line.377"></a>
-<span class="sourceLineNo">378</span><a name="line.378"></a>
-<span class="sourceLineNo">379</span>  /**<a name="line.379"></a>
-<span class="sourceLineNo">380</span>   * Check whether anything need to be done based on the current active set size.<a name="line.380"></a>
-<span class="sourceLineNo">381</span>   * The method is invoked upon every addition to the active set.<a name="line.381"></a>
-<span class="sourceLineNo">382</span>   * For CompactingMemStore, flush the active set to the read-only memory if it's<a name="line.382"></a>
-<span class="sourceLineNo">383</span>   * size is above threshold<a name="line.383"></a>
-<span class="sourceLineNo">384</span>   */<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  @Override<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  protected void checkActiveSize() {<a name="line.386"></a>
-<span class="sourceLineNo">387</span>    if (shouldFlushInMemory()) {<a name="line.387"></a>
-<span class="sourceLineNo">388</span>      /* The thread is dispatched to flush-in-memory. This cannot be done<a name="line.388"></a>
-<span class="sourceLineNo">389</span>      * on the same thread, because for flush-in-memory we require updatesLock<a name="line.389"></a>
-<span class="sourceLineNo">390</span>      * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      * in the shared mode. */<a name="line.391"></a>
-<span class="sourceLineNo">392</span>      InMemoryFlushRunnable runnable = new InMemoryFlushRunnable();<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      if (LOG.isTraceEnabled()) {<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        LOG.trace(<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          "Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());<a name="line.395"></a>
-<span class="sourceLineNo">396</span>      }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      getPool().execute(runnable);<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span>  }<a name="line.399"></a>
-<span class="sourceLineNo">400</span><a name="line.400"></a>
-<span class="sourceLineNo">401</span>  // internally used method, externally visible only for tests<a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  // otherwise there is a deadlock<a name="line.403"></a>
-<span class="sourceLineNo">404</span>  @VisibleForTesting<a name="line.404"></a>
-<span class="sourceLineNo">405</span>  void flushInMemory() throws IOException {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>    // setting the inMemoryFlushInProgress flag again for the case this method is invoked<a name="line.406"></a>
-<span class="sourceLineNo">407</span>    // directly (only in tests) in the common path setting from true to true is idempotent<a name="line.407"></a>
-<span class="sourceLineNo">408</span>    inMemoryFlushInProgress.set(true);<a name="line.408"></a>
-<span class="sourceLineNo">409</span>    try {<a name="line.409"></a>
-<span class="sourceLineNo">410</span>      // Phase I: Update the pipeline<a name="line.410"></a>
-<span class="sourceLineNo">411</span>      getRegionServices().blockUpdates();<a name="line.411"></a>
-<span class="sourceLineNo">412</span>      try {<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");<a name="line.413"></a>
-<span class="sourceLineNo">414</span>        pushActiveToPipeline(this.active);<a name="line.414"></a>
-<span class="sourceLineNo">415</span>      } finally {<a name="line.415"></a>
-<span class="sourceLineNo">416</span>        getRegionServices().unblockUpdates();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>      }<a name="line.417"></a>
-<span class="sourceLineNo">418</span><a name="line.418"></a>
-<span class="sourceLineNo">419</span>      // Used by tests<a name="line.419"></a>
-<span class="sourceLineNo">420</span>      if (!allowCompaction.get()) {<a name="line.420"></a>
-<span class="sourceLineNo">421</span>        return;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      // Phase II: Compact the pipeline<a name="line.423"></a>
-<span class="sourceLineNo">424</span>      try {<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        // Speculative compaction execution, may be interrupted if flush is forced while<a name="line.425"></a>
-<span class="sourceLineNo">426</span>        // compaction is in progress<a name="line.426"></a>
-<span class="sourceLineNo">427</span>        compactor.start();<a name="line.427"></a>
-<span class="sourceLineNo">428</span>      } catch (IOException e) {<a name="line.428"></a>
-<span class="sourceLineNo">429</span>        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",<a name="line.429"></a>
-<span class="sourceLineNo">430</span>            getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>      }<a name="line.431"></a>
-<span class="sourceLineNo">432</span>    } finally {<a name="line.432"></a>
-<span class="sourceLineNo">433</span>      inMemoryFlushInProgress.set(false);<a name="line.433"></a>
-<span class="sourceLineNo">434</span>      LOG.trace("IN-MEMORY FLUSH: end");<a name="line.434"></a>
-<span class="sourceLineNo">435</span>    }<a name="line.435"></a>
-<span class="sourceLineNo">436</span>  }<a name="line.436"></a>
-<span class="sourceLineNo">437</span><a name="line.437"></a>
-<span class="sourceLineNo">438</span>  private Segment getLastSegment() {<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    Segment localActive = getActive();<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    Segment tail = pipeline.getTail();<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    return tail == null ? localActive : tail;<a name="line.441"></a>
+<span class="sourceLineNo">357</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    MutableSegment activeTmp = active;<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    List&lt;? extends Segment&gt; pipelineList = pipeline.getSegments();<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    List&lt;? extends Segment&gt; snapshotList = snapshot.getAllSegments();<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    long numberOfSegments = 1L + pipelineList.size() + snapshotList.size();<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    // The list of elements in pipeline + the active element + the snapshot segment<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    List&lt;KeyValueScanner&gt; list = createList((int) numberOfSegments);<a name="line.363"></a>
+<span class="sourceLineNo">364</span>    addToScanners(activeTmp, readPt, list);<a name="line.364"></a>
+<span class="sourceLineNo">365</span>    addToScanners(pipelineList, readPt, list);<a name="line.365"></a>
+<span class="sourceLineNo">366</span>    addToScanners(snapshotList, readPt, list);<a name="line.366"></a>
+<span class="sourceLineNo">367</span>    return list;<a name="line.367"></a>
+<span class="sourceLineNo">368</span>  }<a name="line.368"></a>
+<span class="sourceLineNo">369</span><a name="line.369"></a>
+<span class="sourceLineNo">370</span>   @VisibleForTesting<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   protected List&lt;KeyValueScanner&gt; createList(int capacity) {<a name="line.371"></a>
+<span class="sourceLineNo">372</span>     return new ArrayList&lt;&gt;(capacity);<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   }<a name="line.373"></a>
+<span class="sourceLineNo">374</span><a name="line.374"></a>
+<span class="sourceLineNo">375</span>  /**<a name="line.375"></a>
+<span class="sourceLineNo">376</span>   * Check whether anything need to be done based on the current active set size.<a name="line.376"></a>
+<span class="sourceLineNo">377</span>   * The method is invoked upon every addition to the active set.<a name="line.377"></a>
+<span class="sourceLineNo">378</span>   * For CompactingMemStore, flush the active set to the read-only memory if it's<a name="line.378"></a>
+<span class="sourceLineNo">379</span>   * size is above threshold<a name="line.379"></a>
+<span class="sourceLineNo">380</span>   */<a name="line.380"></a>
+<span class="sourceLineNo">381</span>  @Override<a name="line.381"></a>
+<span class="sourceLineNo">382</span>  protected void checkActiveSize() {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    if (shouldFlushInMemory()) {<a name="line.383"></a>
+<span class="sourceLineNo">384</span>      /* The thread is dispatched to flush-in-memory. This cannot be done<a name="line.384"></a>
+<span class="sourceLineNo">385</span>      * on the same thread, because for flush-in-memory we require updatesLock<a name="line.385"></a>
+<span class="sourceLineNo">386</span>      * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      * in the shared mode. */<a name="line.387"></a>
+<span class="sourceLineNo">388</span>      InMemoryFlushRunnable runnable = new InMemoryFlushRunnable();<a name="line.388"></a>
+<span class="sourceLineNo">389</span>      if (LOG.isTraceEnabled()) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>        LOG.trace(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>          "Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());<a name="line.391"></a>
+<span class="sourceLineNo">392</span>      }<a name="line.392"></a>
+<span class="sourceLineNo">393</span>      getPool().execute(runnable);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
+<span class="sourceLineNo">395</span>  }<a name="line.395"></a>
+<span class="sourceLineNo">396</span><a name="line.396"></a>
+<span class="sourceLineNo">397</span>  // internally used method, externally visible only for tests<a name="line.397"></a>
+<span class="sourceLineNo">398</span>  // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,<a name="line.398"></a>
+<span class="sourceLineNo">399</span>  // otherwise there is a deadlock<a name="line.399"></a>
+<span class="sourceLineNo">400</span>  @VisibleForTesting<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  void flushInMemory() throws IOException {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    // setting the inMemoryFlushInProgress flag again for the case this method is invoked<a name="line.402"></a>
+<span class="sourceLineNo">403</span>    // directly (only in tests) in the common path setting from true to true is idempotent<a name="line.403"></a>
+<span class="sourceLineNo">404</span>    inMemoryFlushInProgress.set(true);<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    try {<a name="line.405"></a>
+<span class="sourceLineNo">406</span>      // Phase I: Update the pipeline<a name="line.406"></a>
+<span class="sourceLineNo">407</span>      getRegionServices().blockUpdates();<a name="line.407"></a>
+<span class="sourceLineNo">408</span>      try {<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");<a name="line.409"></a>
+<span class="sourceLineNo">410</span>        pushActiveToPipeline(this.active);<a name="line.410"></a>
+<span class="sourceLineNo">411</span>      } finally {<a name="line.411"></a>
+<span class="sourceLineNo">412</span>        getRegionServices().unblockUpdates();<a name="line.412"></a>
+<span class="sourceLineNo">413</span>      }<a name="line.413"></a>
+<span class="sourceLineNo">414</span><a name="line.414"></a>
+<span class="sourceLineNo">415</span>      // Used by tests<a name="line.415"></a>
+<span class="sourceLineNo">416</span>      if (!allowCompaction.get()) {<a name="line.416"></a>
+<span class="sourceLineNo">417</span>        return;<a name="line.417"></a>
+<span class="sourceLineNo">418</span>      }<a name="line.418"></a>
+<span class="sourceLineNo">419</span>      // Phase II: Compact the pipeline<a name="line.419"></a>
+<span class="sourceLineNo">420</span>      try {<a name="line.420"></a>
+<span class="sourceLineNo">421</span>        // Speculative compaction execution, may be interrupted if flush is forced while<a name="line.421"></a>
+<span class="sourceLineNo">422</span>        // compaction is in progress<a name="line.422"></a>
+<span class="sourceLineNo">423</span>        compactor.start();<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      } catch (IOException e) {<a name="line.424"></a>
+<span class="sourceLineNo">425</span>        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",<a name="line.425"></a>
+<span class="sourceLineNo">426</span>            getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e);<a name="line.426"></a>
+<span class="sourceLineNo">427</span>      }<a name="line.427"></a>
+<span class="sourceLineNo">428</span>    } finally {<a name="line.428"></a>
+<span class="sourceLineNo">429</span>      inMemoryFlushInProgress.set(false);<a name="line.429"></a>
+<span class="sourceLineNo">430</span>      LOG.trace("IN-MEMORY FLUSH: end");<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    }<a name="line.431"></a>
+<span class="sourceLineNo">432</span>  }<a name="line.432"></a>
+<span class="sourceLineNo">433</span><a name="line.433"></a>
+<span class="sourceLineNo">434</span>  private Segment getLastSegment() {<a name="line.434"></a>
+<span class="sourceLineNo">435</span>    Segment localActive = getActive();<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    Segment tail = pipeline.getTail();<a name="line.436"></a>
+<span class="sourceLineNo">437</span>    return tail == null ? localActive : tail;<a name="line.437"></a>
+<span class="sourceLineNo">438</span>  }<a name="line.438"></a>
+<span class="sourceLineNo">439</span><a name="line.439"></a>
+<span class="sourceLineNo">440</span>  private byte[] getFamilyNameInBytes() {<a name="line.440"></a>
+<span class="sourceLineNo">441</span>    return store.getColumnFamilyDescriptor().getName();<a name="line.441"></a>
 <span class="sourceLineNo">442</span>  }<a name="line.442"></a>
 <span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>  private byte[] getFamilyNameInBytes() {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>    return store.getColumnFamilyDescriptor().getName();<a name="line.445"></a>
+<span class="sourceLineNo">444</span>  private ThreadPoolExecutor getPool() {<a name="line.444"></a>
+<span class="sourceLineNo">445</span>    return getRegionServices().getInMemoryCompactionPool();<a name="line.445"></a>
 <span class="sourceLineNo">446</span>  }<a name="line.446"></a>
 <span class="sourceLineNo">447</span><a name="line.447"></a>
-<span class="sourceLineNo">448</span>  private ThreadPoolExecutor getPool() {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>    return getRegionServices().getInMemoryCompactionPool();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>  }<a name="line.450"></a>
-<span class="sourceLineNo">451</span><a name="line.451"></a>
-<span class="sourceLineNo">452</span>  @VisibleForTesting<a name="line.452"></a>
-<span class="sourceLineNo">453</span>  protected boolean shouldFlushInMemory() {<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    if (this.active.keySize() &gt; inmemoryFlushSize) { // size above flush threshold<a name="line.454"></a>
-<span class="sourceLineNo">455</span>      if (inWalReplay) {  // when replaying edits from WAL there is no need in in-memory flush<a name="line.455"></a>
-<span class="sourceLineNo">456</span>        return false;     // regardless the size<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>      // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      // the insert of the active into the compaction pipeline<a name="line.459"></a>
-<span class="sourceLineNo">460</span>      return (inMemoryFlushInProgress.compareAndSet(false,true));<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    }<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    return false;<a name="line.462"></a>
-<span class="sourceLineNo">463</span>  }<a name="line.463"></a>
-<span class="sourceLineNo">464</span><a name="line.464"></a>
-<span class="sourceLineNo">465</span>  /**<a name="line.465"></a>
-<span class="sourceLineNo">466</span>   * The request to cancel the compaction asynchronous task (caused by in-memory flush)<a name="line.466"></a>
-<span class="sourceLineNo">467</span>   * The compaction may still happen if the request was sent too late<a name="line.467"></a>
-<span class="sourceLineNo">468</span>   * Non-blocking request<a name="line.468"></a>
-<span class="sourceLineNo">469</span>   */<a name="line.469"></a>
-<span class="sourceLineNo">470</span>  private void stopCompaction() {<a name="line.470"></a>
-<span class="sourceLineNo">471</span>    if (inMemoryFlushInProgress.get()) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      compactor.stop();<a name="line.472"></a>
-<span class="sourceLineNo">473</span>    }<a name="line.473"></a>
-<span class="sourceLineNo">474</span>  }<a name="line.474"></a>
-<span class="sourceLineNo">475</span><a name="line.475"></a>
-<span class="sourceLineNo">476</span>  protected void pushActiveToPipeline(MutableSegment active) {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>    if (!active.isEmpty()) {<a name="line.477"></a>
-<span class="sourceLineNo">478</span>      pipeline.pushHead(active);<a name="line.478"></a>
-<span class="sourceLineNo">479</span>      resetActive();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>    }<a name="line.480"></a>
-<span class="sourceLineNo">481</span>  }<a name="line.481"></a>
-<span class="sourceLineNo">482</span><a name="line.482"></a>
-<span class="sourceLineNo">483</span>  private void pushTailToSnapshot() {<a name="line.483"></a>
-<span class="sourceLineNo">484</span>    VersionedSegmentsList segments = pipeline.getVersionedTail();<a name="line.484"></a>
-<span class="sourceLineNo">485</span>    pushToSnapshot(segments.getStoreSegments());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.486"></a>
-<span class="sourceLineNo">487</span>    pipeline.swap(segments,null,false, false);<a name="line.487"></a>
-<span class="sourceLineNo">488</span>  }<a name="line.488"></a>
-<span class="sourceLineNo">489</span><a name="line.489"></a>
-<span class="sourceLineNo">490</span>  private void pushPipelineToSnapshot() {<a name="line.490"></a>
-<span class="sourceLineNo">491</span>    int iterationsCnt = 0;<a name="line.491"></a>
-<span class="sourceLineNo">492</span>    boolean done = false;<a name="line.492"></a>
-<span class="sourceLineNo">493</span>    while (!done) {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      iterationsCnt++;<a name="line.494"></a>
-<span class="sourceLineNo">495</span>      VersionedSegmentsList segments = pipeline.getVersionedList();<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      pushToSnapshot(segments.getStoreSegments());<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      // swap can return false in case the pipeline was updated by ongoing compaction<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      // and the version increase, the chance of it happenning is very low<a name="line.498"></a>
-<span class="sourceLineNo">499</span>      // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      done = pipeline.swap(segments, null, false, false);<a name="line.500"></a>
-<span class="sourceLineNo">501</span>      if (iterationsCnt&gt;2) {<a name="line.501"></a>
-<span class="sourceLineNo">502</span>        // practically it is impossible that this loop iterates more than two times<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        // (because the compaction is stopped and none restarts it while in snapshot request),<a name="line.503"></a>
-<span class="sourceLineNo">504</span>        // however stopping here for the case of the infinite loop causing by any error<a name="line.504"></a>
-<span class="sourceLineNo">505</span>        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +<a name="line.505"></a>
-<span class="sourceLineNo">506</span>            " while flushing to disk.");<a name="line.506"></a>
-<span class="sourceLineNo">507</span>        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());<a name="line.507"></a>
-<span class="sourceLineNo">508</span>        break;<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      }<a name="line.509"></a>
-<span class="sourceLineNo">510</span>    }<a name="line.510"></a>
-<span class="sourceLineNo">511</span>  }<a name="line.511"></a>
-<span class="sourceLineNo">512</span><a name="line.512"></a>
-<span class="sourceLineNo">513</span>  private void pushToSnapshot(List&lt;ImmutableSegment&gt; segments) {<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    if(segments.isEmpty()) return;<a name="line.514"></a>
-<span class="sourceLineNo">515</span>    if(segments.size() == 1 &amp;&amp; !segments.get(0).isEmpty()) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      this.snapshot = segments.get(0);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>      return;<a name="line.517"></a>
-<span class="sourceLineNo">518</span>    } else { // create composite snapshot<a name="line.518"></a>
-<span class="sourceLineNo">519</span>      this.snapshot =<a name="line.519"></a>
-<span class="sourceLineNo">520</span>          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    }<a name="line.521"></a>
+<span class="sourceLineNo">448</span>  @VisibleForTesting<a name="line.448"></a>
+<span class="sourceLineNo">449</span>  protected boolean shouldFlushInMemory() {<a name="line.449"></a>
+<span class="sourceLineNo">450</span>    if (this.active.keySize() &gt; inmemoryFlushSize) { // size above flush threshold<a name="line.450"></a>
+<span class="sourceLineNo">451</span>      if (inWalReplay) {  // when replaying edits from WAL there is no need in in-memory flush<a name="line.451"></a>
+<span class="sourceLineNo">452</span>        return false;     // regardless the size<a name="line.452"></a>
+<span class="sourceLineNo">453</span>      }<a name="line.453"></a>
+<span class="sourceLineNo">454</span>      // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude<a name="line.454"></a>
+<span class="sourceLineNo">455</span>      // the insert of the active into the compaction pipeline<a name="line.455"></a>
+<span class="sourceLineNo">456</span>      return (inMemoryFlushInProgress.compareAndSet(false,true));<a name="line.456"></a>
+<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>    return false;<a name="line.458"></a>
+<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>  /**<a name="line.461"></a>
+<span class="sourceLineNo">462</span>   * The request to cancel the compaction asynchronous task (caused by in-memory flush)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>   * The compaction may still happen if the request was sent too late<a name="line.463"></a>
+<span class="sourceLineNo">464</span>   * Non-blocking request<a name="line.464"></a>
+<span class="sourceLineNo">465</span>   */<a name="line.465"></a>
+<span class="sourceLineNo">466</span>  private void stopCompaction() {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>    if (inMemoryFlushInProgress.get()) {<a name="line.467"></a>
+<span class="sourceLineNo">468</span>      compactor.stop();<a name="line.468"></a>
+<span class="sourceLineNo">469</span>    }<a name="line.469"></a>
+<span class="sourceLineNo">470</span>  }<a name="line.470"></a>
+<span class="sourceLineNo">471</span><a name="line.471"></a>
+<span class="sourceLineNo">472</span>  protected void pushActiveToPipeline(MutableSegment active) {<a name="line.472"></a>
+<span class="sourceLineNo">473</span>    if (!active.isEmpty()) {<a name="line.473"></a>
+<span class="sourceLineNo">474</span>      pipeline.pushHead(active);<a name="line.474"></a>
+<span class="sourceLineNo">475</span>      resetActive();<a name="line.475"></a>
+<span class="sourceLineNo">476</span>    }<a name="line.476"></a>
+<span class="sourceLineNo">477</span>  }<a name="line.477"></a>
+<span class="sourceLineNo">478</span><a name="line.478"></a>
+<span class="sourceLineNo">479</span>  private void pushTailToSnapshot() {<a name="line.479"></a>
+<span class="sourceLineNo">480</span>    VersionedSegmentsList segments = pipeline.getVersionedTail();<a name="line.480"></a>
+<span class="sourceLineNo">481</span>    pushToSnapshot(segments.getStoreSegments());<a name="line.481"></a>
+<span class="sourceLineNo">482</span>    // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.482"></a>
+<span class="sourceLineNo">483</span>    pipeline.swap(segments,null,false, false);<a name="line.483"></a>
+<span class="sourceLineNo">484</span>  }<a name="line.484"></a>
+<span class="sourceLineNo">485</span><a name="line.485"></a>
+<span class="sourceLineNo">486</span>  private void pushPipelineToSnapshot() {<a name="line.486"></a>
+<span class="sourceLineNo">487</span>    int iterationsCnt = 0;<a name="line.487"></a>
+<span class="sourceLineNo">488</span>    boolean done = false;<a name="line.488"></a>
+<span class="sourceLineNo">489</span>    while (!done) {<a name="line.489"></a>
+<span class="sourceLineNo">490</span>      iterationsCnt++;<a name="line.490"></a>
+<span class="sourceLineNo">491</span>      VersionedSegmentsList segments = pipeline.getVersionedList();<a name="line.491"></a>
+<span class="sourceLineNo">492</span>      pushToSnapshot(segments.getStoreSegments());<a name="line.492"></a>
+<span class="sourceLineNo">493</span>      // swap can return false in case the pipeline was updated by ongoing compaction<a name="line.493"></a>
+<span class="sourceLineNo">494</span>      // and the version increase, the chance of it happenning is very low<a name="line.494"></a>
+<span class="sourceLineNo">495</span>      // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      done = pipeline.swap(segments, null, false, false);<a name="line.496"></a>
+<span class="sourceLineNo">497</span>      if (iterationsCnt&gt;2) {<a name="line.497"></a>
+<span class="sourceLineNo">498</span>        // practically it is impossible that this loop iterates more than two times<a name="line.498"></a>
+<span class="sourceLineNo">499</span>        // (because the compaction is stopped and none restarts it while in snapshot request),<a name="line.499"></a>
+<span class="sourceLineNo">500</span>        // however stopping here for the case of the infinite loop causing by any error<a name="line.500"></a>
+<span class="sourceLineNo">501</span>        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +<a name="line.501"></a>
+<span class="sourceLineNo">502</span>            " while flushing to disk.");<a name="line.502"></a>
+<span class="sourceLineNo">503</span>        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());<a name="line.503"></a>
+<span class="sourceLineNo">504</span>        break;<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      }<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    }<a name="line.506"></a>
+<span class="sourceLineNo">507</span>  }<a name="line.507"></a>
+<span class="sourceLineNo">508</span><a name="line.508"></a>
+<span class="sourceLineNo">509</span>  private void pushToSnapshot(List&lt;ImmutableSegment&gt; segments) {<a name="line.509"></a>
+<span class="sourceLineNo">510</span>    if(segments.isEmpty()) return;<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    if(segments.size() == 1 &amp;&amp; !segments.get(0).isEmpty()) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      this.snapshot = segments.get(0);<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      return;<a name="line.513"></a>
+<span class="sourceLineNo">514</span>    } else { // create composite snapshot<a name="line.514"></a>
+<span class="sourceLineNo">515</span>      this.snapshot =<a name="line.515"></a>
+<span class="sourceLineNo">516</span>          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>  }<a name="line.518"></a>
+<span class="sourceLineNo">519</span><a name="line.519"></a>
+<span class="sourceLineNo">520</span>  private RegionServicesForStores getRegionServices() {<a name="line.520"></a>
+<span class="sourceLineNo">521</span>    return regionServices;<a name="line.521"></a>
 <span class="sourceLineNo">522</span>  }<a name="line.522"></a>
 <span class="sourceLineNo">523</span><a name="line.523"></a>
-<span class="sourceLineNo">524</span>  private RegionServicesForStores getRegionServices() {<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    return regionServices;<a name="line.525"></a>
-<span class="sourceLineNo">526</span>  }<a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>  /**<a name="line.528"></a>
-<span class="sourceLineNo">529</span>  * The in-memory-flusher thread performs the flush asynchronously.<a name="line.529"></a>
-<span class="sourceLineNo">530</span>  * There is at most one thread per memstore instance.<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock<a name="line.531"></a>
-<span class="sourceLineNo">532</span>  * and compacts the pipeline.<a name="line.532"></a>
-<span class="sourceLineNo">533</span>  */<a name="line.533"></a>
-<span class="sourceLineNo">534</span>  private class InMemoryFlushRunnable implements Runnable {<a name="line.534"></a>
-<span class="sourceLineNo">535</span><a name="line.535"></a>
-<span class="sourceLineNo">536</span>    @Override<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    public void run() {<a name="line.537"></a>
-<span class="sourceLineNo">538</span>      try {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        flushInMemory();<a name="line.539"></a>
-<span class="sourceLineNo">540</span>      } catch (IOException e) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>        LOG.warn("Unable to run memstore compaction. region "<a name="line.541"></a>
-<span class="sourceLineNo">542</span>            + getRegionServices().getRegionInfo().getRegionNameAsString()<a name="line.542"></a>
-<span class="sourceLineNo">543</span>            + "store: "+ getFamilyName(), e);<a name="line.543"></a>
-<span class="sourceLineNo">544</span>      }<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
-<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
-<span class="sourceLineNo">547</span><a name="line.547"></a>
-<span class="sourceLineNo">548</span>  @VisibleForTesting<a name="line.548"></a>
-<span class="sourceLineNo">549</span>  boolean isMemStoreFlushingInMemory() {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>    return inMemoryFlushInProgress.get();<a name="line.550"></a>
-<span class="sourceLineNo">551</span>  }<a name="line.551"></a>
-<span class="sourceLineNo">552</span><a name="line.552"></a>
-<span class="sourceLineNo">553</span>  /**<a name="line.553"></a>
-<span class="sourceLineNo">554</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.554"></a>
-<span class="sourceLineNo">555</span>   *             first.<a name="line.555"></a>
-<span class="sourceLineNo">556</span>   * @return Next row or null if none found.<a name="line.556"></a>
-<span class="sourceLineNo">557</span>   */<a name="line.557"></a>
-<span class="sourceLineNo">558</span>  Cell getNextRow(final Cell cell) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Cell lowest = null;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    List&lt;Segment&gt; segments = getSegments();<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    for (Segment segment : segments) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      if (lowest == null) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        lowest = getNextRow(cell, segment.getCellSet());<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      } else {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));<a name="line.565"></a>
-<span class="sourceLineNo">566</span>      }<a name="line.566"></a>
-<span class="sourceLineNo">567</span>    }<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    return lowest;<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  }<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>  @VisibleForTesting<a name="line.571"></a>
-<span class="sourceLineNo">572</span>  long getInmemoryFlushSize() {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    return inmemoryFlushSize;<a name="line.573"></a>
-<span class="sourceLineNo">574</span>  }<a name="line.574"></a>
-<span class="sourceLineNo">575</span><a name="line.575"></a>
-<span class="sourceLineNo">576</span>  // debug method<a name="line.576"></a>
-<span class="sourceLineNo">577</span>  public void debug() {<a name="line.577"></a>
-<span class="sourceLineNo">578</span>    String msg = "active size=" + this.active.keySize();<a name="line.578"></a>
-<span class="sourceLineNo">579</span>    msg += " in-memory flush size is "+ inmemoryFlushSize;<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");<a name="line.580"></a>
-<span class="sourceLineNo">581</span>    msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");<a name="line.581"></a>
-<span class="sourceLineNo">582</span>    LOG.debug(msg);<a name="line.582"></a>
-<span class="sourceLineNo">583</span>  }<a name="line.583"></a>
-<span class="sourceLineNo">584</span><a name="line.584"></a>
-<span class="sourceLineNo">585</span>}<a name="line.585"></a>
+<span class="sourceLineNo">524</span>  /**<a name="line.524"></a>
+<span class="sourceLineNo">525</span>  * The in-memory-flusher thread performs the flush asynchronously.<a name="line.525"></a>
+<span class="sourceLineNo">526</span>  * There is at most one thread per memstore instance.<a name="line.526"></a>
+<span class="sourceLineNo">527</span>  * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock<a name="line.527"></a>
+<span class="sourceLineNo">528</span>  * and compacts the pipeline.<a name="line.528"></a>
+<span class="sourceLineNo">529</span>  */<a name="line.529"></a>
+<span class="sourceLineNo">530</span>  private class InMemoryFlushRunnable implements Runnable {<a name="line.530"></a>
+<span class="sourceLineNo">531</span><a name="line.531"></a>
+<span class="sourceLineNo">532</span>    @Override<a name="line.532"></a>
+<span class="sourceLineNo">533</span>    public void run() {<a name="line.533"></a>
+<span class="sourceLineNo">534</span>      try {<a name="line.534"></a>
+<span class="sourceLineNo">535</span>        flushInMemory();<a name="line.535"></a>
+<span class="sourceLineNo">536</span>      } catch (IOException e) {<a name="line.536"></a>
+<span class="sourceLineNo">537</span>        LOG.warn("Unable to run memstore compaction. region "<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            + getRegionServices().getRegionInfo().getRegionNameAsString()<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            + "store: "+ getFamilyName(), e);<a name="line.539"></a>
+<span class="sourceLineNo">540</span>      }<a name="line.540"></a>
+<span class="sourceLineNo">541</span>    }<a name="line.541"></a>
+<span class="sourceLineNo">542</span>  }<a name="line.542"></a>
+<span class="sourceLineNo">543</span><a name="line.543"></a>
+<span class="sourceLineNo">544</span>  @VisibleForTesting<a name="line.544"></a>
+<span class="sourceLineNo">545</span>  boolean isMemStoreFlushingInMemory() {<a name="line.545"></a>
+<span class="sourceLineNo">546</span>    return inMemoryFlushInProgress.get();<a name="line.546"></a>
+<span class="sourceLineNo">547</span>  }<a name="line.547"></a>
+<span class="sourceLineNo">548</span><a name="line.548"></a>
+<span class="sourceLineNo">549</span>  /**<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   *             first.<a name="line.551"></a>
+<span class="sourceLineNo">552</span>   * @return Next row or null if none found.<a name="line.552"></a>
+<span class="sourceLineNo">553</span>   */<a name="line.553"></a>
+<span class="sourceLineNo">554</span>  Cell getNextRow(final Cell cell) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>    Cell lowest = null;<a name="line.555"></a>
+<span class="sourceLineNo">556</span>    List&lt;Segment&gt; segments = getSegments();<a name="line.556"></a>
+<span class="sourceLineNo">557</span>    for (Segment segment : segments) {<a name="line.557"></a>
+<span class="sourceLineNo">558</span>      if (lowest == null) {<a name="line.558"></a>
+<span class="sourceLineNo">559</span>        lowest = getNextRow(cell, segment.getCellSet());<a name="line.559"></a>
+<span class="sourceLineNo">560</span>      } else {<a name="line.560"></a>
+<span class="sourceLineNo">561</span>        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));<a name="line.561"></a>
+<span class="sourceLineNo">562</span>      }<a name="line.562"></a>
+<span class="sourceLineNo">563</span>    }<a name="line.563"></a>
+<span class="sourceLineNo">564</span>    return lowest;<a name="line.564"></a>
+<span class="sourceLineNo">565</span>  }<a name="line.565"></a>
+<span class="sourceLineNo">566</span><a name="line.566"></a>
+<span class="sourceLineNo">567</span>  @VisibleForTesting<a name="line.567"></a>
+<span class="sourceLineNo">568</span>  long getInmemoryFlushSize() {<a name="line.568"></a>
+<span class="sourceLineNo">569</span>    return inmemoryFlushSize;<a name="line.569"></a>
+<span class="sourceLineNo">570</span>  }<a name="line.570"></a>
+<span class="sourceLineNo">571</span><a name="line.571"></a>
+<span class="sourceLineNo">572</span>  // debug method<a name="line.572"></a>
+<span class="sourceLineNo">573</span>  public void debug() {<a name="line.573"></a>
+<span class="sourceLineNo">574</span>    String msg = "active size=" + this.active.keySize();<a name="line.574"></a>
+<span class="sourceLineNo">575</span>    msg += " in-memory flush size is "+ inmemoryFlushSize;<a name="line.575"></a>
+<span class="sourceLineNo">576</span>    msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");<a name="line.576"></a>
+<span class="sourceLineNo">577</span>    msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");<a name="line.577"></a>
+<span class="sourceLineNo">578</span>    LOG.debug(msg);<a name="line.578"></a>
+<span class="sourceLineNo">579</span>  }<a name="line.579"></a>
+<span class="sourceLineNo">580</span><a name="line.580"></a>
+<span class="sourceLineNo">581</span>}<a name="line.581"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
index 22e7059..01b8a09 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
@@ -362,235 +362,231 @@
 <span class="sourceLineNo">354</span>  }<a name="line.354"></a>
 <span class="sourceLineNo">355</span><a name="line.355"></a>
 <span class="sourceLineNo">356</span>  @Override<a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /*<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Scanners are ordered from 0 (oldest) to newest in increasing order.<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   */<a name="line.359"></a>
-<span class="sourceLineNo">360</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    MutableSegment activeTmp = active;<a name="line.361"></a>
-<span class="sourceLineNo">362</span>    List&lt;? extends Segment&gt; pipelineList = pipeline.getSegments();<a name="line.362"></a>
-<span class="sourceLineNo">363</span>    List&lt;? extends Segment&gt; snapshotList = snapshot.getAllSegments();<a name="line.363"></a>
-<span class="sourceLineNo">364</span>    long order = 1L + pipelineList.size() + snapshotList.size();<a name="line.364"></a>
-<span class="sourceLineNo">365</span>    // The list of elements in pipeline + the active element + the snapshot segment<a name="line.365"></a>
-<span class="sourceLineNo">366</span>    // The order is the Segment ordinal<a name="line.366"></a>
-<span class="sourceLineNo">367</span>    List&lt;KeyValueScanner&gt; list = createList((int) order);<a name="line.367"></a>
-<span class="sourceLineNo">368</span>    order = addToScanners(activeTmp, readPt, order, list);<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    order = addToScanners(pipelineList, readPt, order, list);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    addToScanners(snapshotList, readPt, order, list);<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    return list;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>  }<a name="line.372"></a>
-<span class="sourceLineNo">373</span><a name="line.373"></a>
-<span class="sourceLineNo">374</span>   @VisibleForTesting<a name="line.374"></a>
-<span class="sourceLineNo">375</span>   protected List&lt;KeyValueScanner&gt; createList(int capacity) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>     return new ArrayList&lt;&gt;(capacity);<a name="line.376"></a>
-<span class="sourceLineNo">377</span>   }<a name="line.377"></a>
-<span class="sourceLineNo">378</span><a name="line.378"></a>
-<span class="sourceLineNo">379</span>  /**<a name="line.379"></a>
-<span class="sourceLineNo">380</span>   * Check whether anything need to be done based on the current active set size.<a name="line.380"></a>
-<span class="sourceLineNo">381</span>   * The method is invoked upon every addition to the active set.<a name="line.381"></a>
-<span class="sourceLineNo">382</span>   * For CompactingMemStore, flush the active set to the read-only memory if it's<a name="line.382"></a>
-<span class="sourceLineNo">383</span>   * size is above threshold<a name="line.383"></a>
-<span class="sourceLineNo">384</span>   */<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  @Override<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  protected void checkActiveSize() {<a name="line.386"></a>
-<span class="sourceLineNo">387</span>    if (shouldFlushInMemory()) {<a name="line.387"></a>
-<span class="sourceLineNo">388</span>      /* The thread is dispatched to flush-in-memory. This cannot be done<a name="line.388"></a>
-<span class="sourceLineNo">389</span>      * on the same thread, because for flush-in-memory we require updatesLock<a name="line.389"></a>
-<span class="sourceLineNo">390</span>      * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      * in the shared mode. */<a name="line.391"></a>
-<span class="sourceLineNo">392</span>      InMemoryFlushRunnable runnable = new InMemoryFlushRunnable();<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      if (LOG.isTraceEnabled()) {<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        LOG.trace(<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          "Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());<a name="line.395"></a>
-<span class="sourceLineNo">396</span>      }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      getPool().execute(runnable);<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span>  }<a name="line.399"></a>
-<span class="sourceLineNo">400</span><a name="line.400"></a>
-<span class="sourceLineNo">401</span>  // internally used method, externally visible only for tests<a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  // otherwise there is a deadlock<a name="line.403"></a>
-<span class="sourceLineNo">404</span>  @VisibleForTesting<a name="line.404"></a>
-<span class="sourceLineNo">405</span>  void flushInMemory() throws IOException {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>    // setting the inMemoryFlushInProgress flag again for the case this method is invoked<a name="line.406"></a>
-<span class="sourceLineNo">407</span>    // directly (only in tests) in the common path setting from true to true is idempotent<a name="line.407"></a>
-<span class="sourceLineNo">408</span>    inMemoryFlushInProgress.set(true);<a name="line.408"></a>
-<span class="sourceLineNo">409</span>    try {<a name="line.409"></a>
-<span class="sourceLineNo">410</span>      // Phase I: Update the pipeline<a name="line.410"></a>
-<span class="sourceLineNo">411</span>      getRegionServices().blockUpdates();<a name="line.411"></a>
-<span class="sourceLineNo">412</span>      try {<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");<a name="line.413"></a>
-<span class="sourceLineNo">414</span>        pushActiveToPipeline(this.active);<a name="line.414"></a>
-<span class="sourceLineNo">415</span>      } finally {<a name="line.415"></a>
-<span class="sourceLineNo">416</span>        getRegionServices().unblockUpdates();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>      }<a name="line.417"></a>
-<span class="sourceLineNo">418</span><a name="line.418"></a>
-<span class="sourceLineNo">419</span>      // Used by tests<a name="line.419"></a>
-<span class="sourceLineNo">420</span>      if (!allowCompaction.get()) {<a name="line.420"></a>
-<span class="sourceLineNo">421</span>        return;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      // Phase II: Compact the pipeline<a name="line.423"></a>
-<span class="sourceLineNo">424</span>      try {<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        // Speculative compaction execution, may be interrupted if flush is forced while<a name="line.425"></a>
-<span class="sourceLineNo">426</span>        // compaction is in progress<a name="line.426"></a>
-<span class="sourceLineNo">427</span>        compactor.start();<a name="line.427"></a>
-<span class="sourceLineNo">428</span>      } catch (IOException e) {<a name="line.428"></a>
-<span class="sourceLineNo">429</span>        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",<a name="line.429"></a>
-<span class="sourceLineNo">430</span>            getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>      }<a name="line.431"></a>
-<span class="sourceLineNo">432</span>    } finally {<a name="line.432"></a>
-<span class="sourceLineNo">433</span>      inMemoryFlushInProgress.set(false);<a name="line.433"></a>
-<span class="sourceLineNo">434</span>      LOG.trace("IN-MEMORY FLUSH: end");<a name="line.434"></a>
-<span class="sourceLineNo">435</span>    }<a name="line.435"></a>
-<span class="sourceLineNo">436</span>  }<a name="line.436"></a>
-<span class="sourceLineNo">437</span><a name="line.437"></a>
-<span class="sourceLineNo">438</span>  private Segment getLastSegment() {<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    Segment localActive = getActive();<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    Segment tail = pipeline.getTail();<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    return tail == null ? localActive : tail;<a name="line.441"></a>
+<span class="sourceLineNo">357</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    MutableSegment activeTmp = active;<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    List&lt;? extends Segment&gt; pipelineList = pipeline.getSegments();<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    List&lt;? extends Segment&gt; snapshotList = snapshot.getAllSegments();<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    long numberOfSegments = 1L + pipelineList.size() + snapshotList.size();<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    // The list of elements in pipeline + the active element + the snapshot segment<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    List&lt;KeyValueScanner&gt; list = createList((int) numberOfSegments);<a name="line.363"></a>
+<span class="sourceLineNo">364</span>    addToScanners(activeTmp, readPt, list);<a name="line.364"></a>
+<span class="sourceLineNo">365</span>    addToScanners(pipelineList, readPt, list);<a name="line.365"></a>
+<span class="sourceLineNo">366</span>    addToScanners(snapshotList, readPt, list);<a name="line.366"></a>
+<span class="sourceLineNo">367</span>    return list;<a name="line.367"></a>
+<span class="sourceLineNo">368</span>  }<a name="line.368"></a>
+<span class="sourceLineNo">369</span><a name="line.369"></a>
+<span class="sourceLineNo">370</span>   @VisibleForTesting<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   protected List&lt;KeyValueScanner&gt; createList(int capacity) {<a name="line.371"></a>
+<span class="sourceLineNo">372</span>     return new ArrayList&lt;&gt;(capacity);<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   }<a name="line.373"></a>
+<span class="sourceLineNo">374</span><a name="line.374"></a>
+<span class="sourceLineNo">375</span>  /**<a name="line.375"></a>
+<span class="sourceLineNo">376</span>   * Check whether anything need to be done based on the current active set size.<a name="line.376"></a>
+<span class="sourceLineNo">377</span>   * The method is invoked upon every addition to the active set.<a name="line.377"></a>
+<span class="sourceLineNo">378</span>   * For CompactingMemStore, flush the active set to the read-only memory if it's<a name="line.378"></a>
+<span class="sourceLineNo">379</span>   * size is above threshold<a name="line.379"></a>
+<span class="sourceLineNo">380</span>   */<a name="line.380"></a>
+<span class="sourceLineNo">381</span>  @Override<a name="line.381"></a>
+<span class="sourceLineNo">382</span>  protected void checkActiveSize() {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    if (shouldFlushInMemory()) {<a name="line.383"></a>
+<span class="sourceLineNo">384</span>      /* The thread is dispatched to flush-in-memory. This cannot be done<a name="line.384"></a>
+<span class="sourceLineNo">385</span>      * on the same thread, because for flush-in-memory we require updatesLock<a name="line.385"></a>
+<span class="sourceLineNo">386</span>      * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      * in the shared mode. */<a name="line.387"></a>
+<span class="sourceLineNo">388</span>      InMemoryFlushRunnable runnable = new InMemoryFlushRunnable();<a name="line.388"></a>
+<span class="sourceLineNo">389</span>      if (LOG.isTraceEnabled()) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>        LOG.trace(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>          "Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName());<a name="line.391"></a>
+<span class="sourceLineNo">392</span>      }<a name="line.392"></a>
+<span class="sourceLineNo">393</span>      getPool().execute(runnable);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
+<span class="sourceLineNo">395</span>  }<a name="line.395"></a>
+<span class="sourceLineNo">396</span><a name="line.396"></a>
+<span class="sourceLineNo">397</span>  // internally used method, externally visible only for tests<a name="line.397"></a>
+<span class="sourceLineNo">398</span>  // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,<a name="line.398"></a>
+<span class="sourceLineNo">399</span>  // otherwise there is a deadlock<a name="line.399"></a>
+<span class="sourceLineNo">400</span>  @VisibleForTesting<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  void flushInMemory() throws IOException {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    // setting the inMemoryFlushInProgress flag again for the case this method is invoked<a name="line.402"></a>
+<span class="sourceLineNo">403</span>    // directly (only in tests) in the common path setting from true to true is idempotent<a name="line.403"></a>
+<span class="sourceLineNo">404</span>    inMemoryFlushInProgress.set(true);<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    try {<a name="line.405"></a>
+<span class="sourceLineNo">406</span>      // Phase I: Update the pipeline<a name="line.406"></a>
+<span class="sourceLineNo">407</span>      getRegionServices().blockUpdates();<a name="line.407"></a>
+<span class="sourceLineNo">408</span>      try {<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");<a name="line.409"></a>
+<span class="sourceLineNo">410</span>        pushActiveToPipeline(this.active);<a name="line.410"></a>
+<span class="sourceLineNo">411</span>      } finally {<a name="line.411"></a>
+<span class="sourceLineNo">412</span>        getRegionServices().unblockUpdates();<a name="line.412"></a>
+<span class="sourceLineNo">413</span>      }<a name="line.413"></a>
+<span class="sourceLineNo">414</span><a name="line.414"></a>
+<span class="sourceLineNo">415</span>      // Used by tests<a name="line.415"></a>
+<span class="sourceLineNo">416</span>      if (!allowCompaction.get()) {<a name="line.416"></a>
+<span class="sourceLineNo">417</span>        return;<a name="line.417"></a>
+<span class="sourceLineNo">418</span>      }<a name="line.418"></a>
+<span class="sourceLineNo">419</span>      // Phase II: Compact the pipeline<a name="line.419"></a>
+<span class="sourceLineNo">420</span>      try {<a name="line.420"></a>
+<span class="sourceLineNo">421</span>        // Speculative compaction execution, may be interrupted if flush is forced while<a name="line.421"></a>
+<span class="sourceLineNo">422</span>        // compaction is in progress<a name="line.422"></a>
+<span class="sourceLineNo">423</span>        compactor.start();<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      } catch (IOException e) {<a name="line.424"></a>
+<span class="sourceLineNo">425</span>        LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}",<a name="line.425"></a>
+<span class="sourceLineNo">426</span>            getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e);<a name="line.426"></a>
+<span class="sourceLineNo">427</span>      }<a name="line.427"></a>
+<span class="sourceLineNo">428</span>    } finally {<a name="line.428"></a>
+<span class="sourceLineNo">429</span>      inMemoryFlushInProgress.set(false);<a name="line.429"></a>
+<span class="sourceLineNo">430</span>      LOG.trace("IN-MEMORY FLUSH: end");<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    }<a name="line.431"></a>
+<span class="sourceLineNo">432</span>  }<a name="line.432"></a>
+<span class="sourceLineNo">433</span><a name="line.433"></a>
+<span class="sourceLineNo">434</span>  private Segment getLastSegment() {<a name="line.434"></a>
+<span class="sourceLineNo">435</span>    Segment localActive = getActive();<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    Segment tail = pipeline.getTail();<a name="line.436"></a>
+<span class="sourceLineNo">437</span>    return tail == null ? localActive : tail;<a name="line.437"></a>
+<span class="sourceLineNo">438</span>  }<a name="line.438"></a>
+<span class="sourceLineNo">439</span><a name="line.439"></a>
+<span class="sourceLineNo">440</span>  private byte[] getFamilyNameInBytes() {<a name="line.440"></a>
+<span class="sourceLineNo">441</span>    return store.getColumnFamilyDescriptor().getName();<a name="line.441"></a>
 <span class="sourceLineNo">442</span>  }<a name="line.442"></a>
 <span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>  private byte[] getFamilyNameInBytes() {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>    return store.getColumnFamilyDescriptor().getName();<a name="line.445"></a>
+<span class="sourceLineNo">444</span>  private ThreadPoolExecutor getPool() {<a name="line.444"></a>
+<span class="sourceLineNo">445</span>    return getRegionServices().getInMemoryCompactionPool();<a name="line.445"></a>
 <span class="sourceLineNo">446</span>  }<a name="line.446"></a>
 <span class="sourceLineNo">447</span><a name="line.447"></a>
-<span class="sourceLineNo">448</span>  private ThreadPoolExecutor getPool() {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>    return getRegionServices().getInMemoryCompactionPool();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>  }<a name="line.450"></a>
-<span class="sourceLineNo">451</span><a name="line.451"></a>
-<span class="sourceLineNo">452</span>  @VisibleForTesting<a name="line.452"></a>
-<span class="sourceLineNo">453</span>  protected boolean shouldFlushInMemory() {<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    if (this.active.keySize() &gt; inmemoryFlushSize) { // size above flush threshold<a name="line.454"></a>
-<span class="sourceLineNo">455</span>      if (inWalReplay) {  // when replaying edits from WAL there is no need in in-memory flush<a name="line.455"></a>
-<span class="sourceLineNo">456</span>        return false;     // regardless the size<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>      // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      // the insert of the active into the compaction pipeline<a name="line.459"></a>
-<span class="sourceLineNo">460</span>      return (inMemoryFlushInProgress.compareAndSet(false,true));<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    }<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    return false;<a name="line.462"></a>
-<span class="sourceLineNo">463</span>  }<a name="line.463"></a>
-<span class="sourceLineNo">464</span><a name="line.464"></a>
-<span class="sourceLineNo">465</span>  /**<a name="line.465"></a>
-<span class="sourceLineNo">466</span>   * The request to cancel the compaction asynchronous task (caused by in-memory flush)<a name="line.466"></a>
-<span class="sourceLineNo">467</span>   * The compaction may still happen if the request was sent too late<a name="line.467"></a>
-<span class="sourceLineNo">468</span>   * Non-blocking request<a name="line.468"></a>
-<span class="sourceLineNo">469</span>   */<a name="line.469"></a>
-<span class="sourceLineNo">470</span>  private void stopCompaction() {<a name="line.470"></a>
-<span class="sourceLineNo">471</span>    if (inMemoryFlushInProgress.get()) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      compactor.stop();<a name="line.472"></a>
-<span class="sourceLineNo">473</span>    }<a name="line.473"></a>
-<span class="sourceLineNo">474</span>  }<a name="line.474"></a>
-<span class="sourceLineNo">475</span><a name="line.475"></a>
-<span class="sourceLineNo">476</span>  protected void pushActiveToPipeline(MutableSegment active) {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>    if (!active.isEmpty()) {<a name="line.477"></a>
-<span class="sourceLineNo">478</span>      pipeline.pushHead(active);<a name="line.478"></a>
-<span class="sourceLineNo">479</span>      resetActive();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>    }<a name="line.480"></a>
-<span class="sourceLineNo">481</span>  }<a name="line.481"></a>
-<span class="sourceLineNo">482</span><a name="line.482"></a>
-<span class="sourceLineNo">483</span>  private void pushTailToSnapshot() {<a name="line.483"></a>
-<span class="sourceLineNo">484</span>    VersionedSegmentsList segments = pipeline.getVersionedTail();<a name="line.484"></a>
-<span class="sourceLineNo">485</span>    pushToSnapshot(segments.getStoreSegments());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.486"></a>
-<span class="sourceLineNo">487</span>    pipeline.swap(segments,null,false, false);<a name="line.487"></a>
-<span class="sourceLineNo">488</span>  }<a name="line.488"></a>
-<span class="sourceLineNo">489</span><a name="line.489"></a>
-<span class="sourceLineNo">490</span>  private void pushPipelineToSnapshot() {<a name="line.490"></a>
-<span class="sourceLineNo">491</span>    int iterationsCnt = 0;<a name="line.491"></a>
-<span class="sourceLineNo">492</span>    boolean done = false;<a name="line.492"></a>
-<span class="sourceLineNo">493</span>    while (!done) {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      iterationsCnt++;<a name="line.494"></a>
-<span class="sourceLineNo">495</span>      VersionedSegmentsList segments = pipeline.getVersionedList();<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      pushToSnapshot(segments.getStoreSegments());<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      // swap can return false in case the pipeline was updated by ongoing compaction<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      // and the version increase, the chance of it happenning is very low<a name="line.498"></a>
-<span class="sourceLineNo">499</span>      // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      done = pipeline.swap(segments, null, false, false);<a name="line.500"></a>
-<span class="sourceLineNo">501</span>      if (iterationsCnt&gt;2) {<a name="line.501"></a>
-<span class="sourceLineNo">502</span>        // practically it is impossible that this loop iterates more than two times<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        // (because the compaction is stopped and none restarts it while in snapshot request),<a name="line.503"></a>
-<span class="sourceLineNo">504</span>        // however stopping here for the case of the infinite loop causing by any error<a name="line.504"></a>
-<span class="sourceLineNo">505</span>        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +<a name="line.505"></a>
-<span class="sourceLineNo">506</span>            " while flushing to disk.");<a name="line.506"></a>
-<span class="sourceLineNo">507</span>        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());<a name="line.507"></a>
-<span class="sourceLineNo">508</span>        break;<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      }<a name="line.509"></a>
-<span class="sourceLineNo">510</span>    }<a name="line.510"></a>
-<span class="sourceLineNo">511</span>  }<a name="line.511"></a>
-<span class="sourceLineNo">512</span><a name="line.512"></a>
-<span class="sourceLineNo">513</span>  private void pushToSnapshot(List&lt;ImmutableSegment&gt; segments) {<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    if(segments.isEmpty()) return;<a name="line.514"></a>
-<span class="sourceLineNo">515</span>    if(segments.size() == 1 &amp;&amp; !segments.get(0).isEmpty()) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      this.snapshot = segments.get(0);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>      return;<a name="line.517"></a>
-<span class="sourceLineNo">518</span>    } else { // create composite snapshot<a name="line.518"></a>
-<span class="sourceLineNo">519</span>      this.snapshot =<a name="line.519"></a>
-<span class="sourceLineNo">520</span>          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    }<a name="line.521"></a>
+<span class="sourceLineNo">448</span>  @VisibleForTesting<a name="line.448"></a>
+<span class="sourceLineNo">449</span>  protected boolean shouldFlushInMemory() {<a name="line.449"></a>
+<span class="sourceLineNo">450</span>    if (this.active.keySize() &gt; inmemoryFlushSize) { // size above flush threshold<a name="line.450"></a>
+<span class="sourceLineNo">451</span>      if (inWalReplay) {  // when replaying edits from WAL there is no need in in-memory flush<a name="line.451"></a>
+<span class="sourceLineNo">452</span>        return false;     // regardless the size<a name="line.452"></a>
+<span class="sourceLineNo">453</span>      }<a name="line.453"></a>
+<span class="sourceLineNo">454</span>      // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude<a name="line.454"></a>
+<span class="sourceLineNo">455</span>      // the insert of the active into the compaction pipeline<a name="line.455"></a>
+<span class="sourceLineNo">456</span>      return (inMemoryFlushInProgress.compareAndSet(false,true));<a name="line.456"></a>
+<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>    return false;<a name="line.458"></a>
+<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>  /**<a name="line.461"></a>
+<span class="sourceLineNo">462</span>   * The request to cancel the compaction asynchronous task (caused by in-memory flush)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>   * The compaction may still happen if the request was sent too late<a name="line.463"></a>
+<span class="sourceLineNo">464</span>   * Non-blocking request<a name="line.464"></a>
+<span class="sourceLineNo">465</span>   */<a name="line.465"></a>
+<span class="sourceLineNo">466</span>  private void stopCompaction() {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>    if (inMemoryFlushInProgress.get()) {<a name="line.467"></a>
+<span class="sourceLineNo">468</span>      compactor.stop();<a name="line.468"></a>
+<span class="sourceLineNo">469</span>    }<a name="line.469"></a>
+<span class="sourceLineNo">470</span>  }<a name="line.470"></a>
+<span class="sourceLineNo">471</span><a name="line.471"></a>
+<span class="sourceLineNo">472</span>  protected void pushActiveToPipeline(MutableSegment active) {<a name="line.472"></a>
+<span class="sourceLineNo">473</span>    if (!active.isEmpty()) {<a name="line.473"></a>
+<span class="sourceLineNo">474</span>      pipeline.pushHead(active);<a name="line.474"></a>
+<span class="sourceLineNo">475</span>      resetActive();<a name="line.475"></a>
+<span class="sourceLineNo">476</span>    }<a name="line.476"></a>
+<span class="sourceLineNo">477</span>  }<a name="line.477"></a>
+<span class="sourceLineNo">478</span><a name="line.478"></a>
+<span class="sourceLineNo">479</span>  private void pushTailToSnapshot() {<a name="line.479"></a>
+<span class="sourceLineNo">480</span>    VersionedSegmentsList segments = pipeline.getVersionedTail();<a name="line.480"></a>
+<span class="sourceLineNo">481</span>    pushToSnapshot(segments.getStoreSegments());<a name="line.481"></a>
+<span class="sourceLineNo">482</span>    // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.482"></a>
+<span class="sourceLineNo">483</span>    pipeline.swap(segments,null,false, false);<a name="line.483"></a>
+<span class="sourceLineNo">484</span>  }<a name="line.484"></a>
+<span class="sourceLineNo">485</span><a name="line.485"></a>
+<span class="sourceLineNo">486</span>  private void pushPipelineToSnapshot() {<a name="line.486"></a>
+<span class="sourceLineNo">487</span>    int iterationsCnt = 0;<a name="line.487"></a>
+<span class="sourceLineNo">488</span>    boolean done = false;<a name="line.488"></a>
+<span class="sourceLineNo">489</span>    while (!done) {<a name="line.489"></a>
+<span class="sourceLineNo">490</span>      iterationsCnt++;<a name="line.490"></a>
+<span class="sourceLineNo">491</span>      VersionedSegmentsList segments = pipeline.getVersionedList();<a name="line.491"></a>
+<span class="sourceLineNo">492</span>      pushToSnapshot(segments.getStoreSegments());<a name="line.492"></a>
+<span class="sourceLineNo">493</span>      // swap can return false in case the pipeline was updated by ongoing compaction<a name="line.493"></a>
+<span class="sourceLineNo">494</span>      // and the version increase, the chance of it happenning is very low<a name="line.494"></a>
+<span class="sourceLineNo">495</span>      // In Swap: don't close segments (they are in snapshot now) and don't update the region size<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      done = pipeline.swap(segments, null, false, false);<a name="line.496"></a>
+<span class="sourceLineNo">497</span>      if (iterationsCnt&gt;2) {<a name="line.497"></a>
+<span class="sourceLineNo">498</span>        // practically it is impossible that this loop iterates more than two times<a name="line.498"></a>
+<span class="sourceLineNo">499</span>        // (because the compaction is stopped and none restarts it while in snapshot request),<a name="line.499"></a>
+<span class="sourceLineNo">500</span>        // however stopping here for the case of the infinite loop causing by any error<a name="line.500"></a>
+<span class="sourceLineNo">501</span>        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +<a name="line.501"></a>
+<span class="sourceLineNo">502</span>            " while flushing to disk.");<a name="line.502"></a>
+<span class="sourceLineNo">503</span>        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());<a name="line.503"></a>
+<span class="sourceLineNo">504</span>        break;<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      }<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    }<a name="line.506"></a>
+<span class="sourceLineNo">507</span>  }<a name="line.507"></a>
+<span class="sourceLineNo">508</span><a name="line.508"></a>
+<span class="sourceLineNo">509</span>  private void pushToSnapshot(List&lt;ImmutableSegment&gt; segments) {<a name="line.509"></a>
+<span class="sourceLineNo">510</span>    if(segments.isEmpty()) return;<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    if(segments.size() == 1 &amp;&amp; !segments.get(0).isEmpty()) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      this.snapshot = segments.get(0);<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      return;<a name="line.513"></a>
+<span class="sourceLineNo">514</span>    } else { // create composite snapshot<a name="line.514"></a>
+<span class="sourceLineNo">515</span>      this.snapshot =<a name="line.515"></a>
+<span class="sourceLineNo">516</span>          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>  }<a name="line.518"></a>
+<span class="sourceLineNo">519</span><a name="line.519"></a>
+<span class="sourceLineNo">520</span>  private RegionServicesForStores getRegionServices() {<a name="line.520"></a>
+<span class="sourceLineNo">521</span>    return regionServices;<a name="line.521"></a>
 <span class="sourceLineNo">522</span>  }<a name="line.522"></a>
 <span class="sourceLineNo">523</span><a name="line.523"></a>
-<span class="sourceLineNo">524</span>  private RegionServicesForStores getRegionServices() {<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    return regionServices;<a name="line.525"></a>
-<span class="sourceLineNo">526</span>  }<a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>  /**<a name="line.528"></a>
-<span class="sourceLineNo">529</span>  * The in-memory-flusher thread performs the flush asynchronously.<a name="line.529"></a>
-<span class="sourceLineNo">530</span>  * There is at most one thread per memstore instance.<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock<a name="line.531"></a>
-<span class="sourceLineNo">532</span>  * and compacts the pipeline.<a name="line.532"></a>
-<span class="sourceLineNo">533</span>  */<a name="line.533"></a>
-<span class="sourceLineNo">534</span>  private class InMemoryFlushRunnable implements Runnable {<a name="line.534"></a>
-<span class="sourceLineNo">535</span><a name="line.535"></a>
-<span class="sourceLineNo">536</span>    @Override<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    public void run() {<a name="line.537"></a>
-<span class="sourceLineNo">538</span>      try {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        flushInMemory();<a name="line.539"></a>
-<span class="sourceLineNo">540</span>      } catch (IOException e) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>        LOG.warn("Unable to run memstore compaction. region "<a name="line.541"></a>
-<span class="sourceLineNo">542</span>            + getRegionServices().getRegionInfo().getRegionNameAsString()<a name="line.542"></a>
-<span class="sourceLineNo">543</span>            + "store: "+ getFamilyName(), e);<a name="line.543"></a>
-<span class="sourceLineNo">544</span>      }<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
-<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
-<span class="sourceLineNo">547</span><a name="line.547"></a>
-<span class="sourceLineNo">548</span>  @VisibleForTesting<a name="line.548"></a>
-<span class="sourceLineNo">549</span>  boolean isMemStoreFlushingInMemory() {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>    return inMemoryFlushInProgress.get();<a name="line.550"></a>
-<span class="sourceLineNo">551</span>  }<a name="line.551"></a>
-<span class="sourceLineNo">552</span><a name="line.552"></a>
-<span class="sourceLineNo">553</span>  /**<a name="line.553"></a>
-<span class="sourceLineNo">554</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.554"></a>
-<span class="sourceLineNo">555</span>   *             first.<a name="line.555"></a>
-<span class="sourceLineNo">556</span>   * @return Next row or null if none found.<a name="line.556"></a>
-<span class="sourceLineNo">557</span>   */<a name="line.557"></a>
-<span class="sourceLineNo">558</span>  Cell getNextRow(final Cell cell) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Cell lowest = null;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    List&lt;Segment&gt; segments = getSegments();<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    for (Segment segment : segments) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      if (lowest == null) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        lowest = getNextRow(cell, segment.getCellSet());<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      } else {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));<a name="line.565"></a>
-<span class="sourceLineNo">566</span>      }<a name="line.566"></a>
-<span class="sourceLineNo">567</span>    }<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    return lowest;<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  }<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>  @VisibleForTesting<a name="line.571"></a>
-<span class="sourceLineNo">572</span>  long getInmemoryFlushSize() {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    return inmemoryFlushSize;<a name="line.573"></a>
-<span class="sourceLineNo">574</span>  }<a name="line.574"></a>
-<span class="sourceLineNo">575</span><a name="line.575"></a>
-<span class="sourceLineNo">576</span>  // debug method<a name="line.576"></a>
-<span class="sourceLineNo">577</span>  public void debug() {<a name="line.577"></a>
-<span class="sourceLineNo">578</span>    String msg = "active size=" + this.active.keySize();<a name="line.578"></a>
-<span class="sourceLineNo">579</span>    msg += " in-memory flush size is "+ inmemoryFlushSize;<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");<a name="line.580"></a>
-<span class="sourceLineNo">581</span>    msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");<a name="line.581"></a>
-<span class="sourceLineNo">582</span>    LOG.debug(msg);<a name="line.582"></a>
-<span class="sourceLineNo">583</span>  }<a name="line.583"></a>
-<span class="sourceLineNo">584</span><a name="line.584"></a>
-<span class="sourceLineNo">585</span>}<a name="line.585"></a>
+<span class="sourceLineNo">524</span>  /**<a name="line.524"></a>
+<span class="sourceLineNo">525</span>  * The in-memory-flusher thread performs the flush asynchronously.<a name="line.525"></a>
+<span class="sourceLineNo">526</span>  * There is at most one thread per memstore instance.<a name="line.526"></a>
+<span class="sourceLineNo">527</span>  * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock<a name="line.527"></a>
+<span class="sourceLineNo">528</span>  * and compacts the pipeline.<a name="line.528"></a>
+<span class="sourceLineNo">529</span>  */<a name="line.529"></a>
+<span class="sourceLineNo">530</span>  private class InMemoryFlushRunnable implements Runnable {<a name="line.530"></a>
+<span class="sourceLineNo">531</span><a name="line.531"></a>
+<span class="sourceLineNo">532</span>    @Override<a name="line.532"></a>
+<span class="sourceLineNo">533</span>    public void run() {<a name="line.533"></a>
+<span class="sourceLineNo">534</span>      try {<a name="line.534"></a>
+<span class="sourceLineNo">535</span>        flushInMemory();<a name="line.535"></a>
+<span class="sourceLineNo">536</span>      } catch (IOException e) {<a name="line.536"></a>
+<span class="sourceLineNo">537</span>        LOG.warn("Unable to run memstore compaction. region "<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            + getRegionServices().getRegionInfo().getRegionNameAsString()<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            + "store: "+ getFamilyName(), e);<a name="line.539"></a>
+<span class="sourceLineNo">540</span>      }<a name="line.540"></a>
+<span class="sourceLineNo">541</span>    }<a name="line.541"></a>
+<span class="sourceLineNo">542</span>  }<a name="line.542"></a>
+<span class="sourceLineNo">543</span><a name="line.543"></a>
+<span class="sourceLineNo">544</span>  @VisibleForTesting<a name="line.544"></a>
+<span class="sourceLineNo">545</span>  boolean isMemStoreFlushingInMemory() {<a name="line.545"></a>
+<span class="sourceLineNo">546</span>    return inMemoryFlushInProgress.get();<a name="line.546"></a>
+<span class="sourceLineNo">547</span>  }<a name="line.547"></a>
+<span class="sourceLineNo">548</span><a name="line.548"></a>
+<span class="sourceLineNo">549</span>  /**<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   *             first.<a name="line.551"></a>
+<span class="sourceLineNo">552</span>   * @return Next row or null if none found.<a name="line.552"></a>
+<span class="sourceLineNo">553</span>   */<a name="line.553"></a>
+<span class="sourceLineNo">554</span>  Cell getNextRow(final Cell cell) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>    Cell lowest = null;<a name="line.555"></a>
+<span class="sourceLineNo">556</span>    List&lt;Segment&gt; segments = getSegments();<a name="line.556"></a>
+<span class="sourceLineNo">557</span>    for (Segment segment : segments) {<a name="line.557"></a>
+<span class="sourceLineNo">558</span>      if (lowest == null) {<a name="line.558"></a>
+<span class="sourceLineNo">559</span>        lowest = getNextRow(cell, segment.getCellSet());<a name="line.559"></a>
+<span class="sourceLineNo">560</span>      } else {<a name="line.560"></a>
+<span class="sourceLineNo">561</span>        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));<a name="line.561"></a>
+<span class="sourceLineNo">562</span>      }<a name="line.562"></a>
+<span class="sourceLineNo">563</span>    }<a name="line.563"></a>
+<span class="sourceLineNo">564</span>    return lowest;<a name="line.564"></a>
+<span class="sourceLineNo">565</span>  }<a name="line.565"></a>
+<span class="sourceLineNo">566</span><a name="line.566"></a>
+<span class="sourceLineNo">567</span>  @VisibleForTesting<a name="line.567"></a>
+<span class="sourceLineNo">568</span>  long getInmemoryFlushSize() {<a name="line.568"></a>
+<span class="sourceLineNo">569</span>    return inmemoryFlushSize;<a name="line.569"></a>
+<span class="sourceLineNo">570</span>  }<a name="line.570"></a>
+<span class="sourceLineNo">571</span><a name="line.571"></a>
+<span class="sourceLineNo">572</span>  // debug method<a name="line.572"></a>
+<span class="sourceLineNo">573</span>  public void debug() {<a name="line.573"></a>
+<span class="sourceLineNo">574</span>    String msg = "active size=" + this.active.keySize();<a name="line.574"></a>
+<span class="sourceLineNo">575</span>    msg += " in-memory flush size is "+ inmemoryFlushSize;<a name="line.575"></a>
+<span class="sourceLineNo">576</span>    msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false");<a name="line.576"></a>
+<span class="sourceLineNo">577</span>    msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false");<a name="line.577"></a>
+<span class="sourceLineNo">578</span>    LOG.debug(msg);<a name="line.578"></a>
+<span class="sourceLineNo">579</span>  }<a name="line.579"></a>
+<span class="sourceLineNo">580</span><a name="line.580"></a>
+<span class="sourceLineNo">581</span>}<a name="line.581"></a>
 
 
 


[12/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineN

<TRUNCATED>

[18/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LO

<TRUNCATED>

[41/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/apidocs/org/apache/hadoop/hbase/util/VersionInfo.html
----------------------------------------------------------------------
diff --git a/apidocs/org/apache/hadoop/hbase/util/VersionInfo.html b/apidocs/org/apache/hadoop/hbase/util/VersionInfo.html
index 833a12e..e29d7bd 100644
--- a/apidocs/org/apache/hadoop/hbase/util/VersionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/util/VersionInfo.html
@@ -371,7 +371,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>main</h4>
-<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.154">main</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</pre>
+<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/VersionInfo.html#line.169">main</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
----------------------------------------------------------------------
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html b/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
index 7dbc29c..5cd476c 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -43,7 +43,7 @@
 <span class="sourceLineNo">035</span><a name="line.35"></a>
 <span class="sourceLineNo">036</span>  // If between two dots there is not a number, we regard it as a very large number so it is<a name="line.36"></a>
 <span class="sourceLineNo">037</span>  // higher than any numbers in the version.<a name="line.37"></a>
-<span class="sourceLineNo">038</span>  private static int VERY_LARGE_NUMBER = 100000;<a name="line.38"></a>
+<span class="sourceLineNo">038</span>  private static final int VERY_LARGE_NUMBER = 100000;<a name="line.38"></a>
 <span class="sourceLineNo">039</span><a name="line.39"></a>
 <span class="sourceLineNo">040</span>  /**<a name="line.40"></a>
 <span class="sourceLineNo">041</span>   * Get the hbase version.<a name="line.41"></a>
@@ -126,43 +126,58 @@
 <span class="sourceLineNo">118</span>      return 0;<a name="line.118"></a>
 <span class="sourceLineNo">119</span>    }<a name="line.119"></a>
 <span class="sourceLineNo">120</span><a name="line.120"></a>
-<span class="sourceLineNo">121</span>    String s1[] = v1.split("\\.|-");//1.2.3-hotfix -&gt; [1, 2, 3, hotfix]<a name="line.121"></a>
-<span class="sourceLineNo">122</span>    String s2[] = v2.split("\\.|-");<a name="line.122"></a>
+<span class="sourceLineNo">121</span>    Object[] v1Comps = getVersionComponents(v1); //1.2.3-hotfix -&gt; [1, 2, 3, hotfix]<a name="line.121"></a>
+<span class="sourceLineNo">122</span>    Object[] v2Comps = getVersionComponents(v2);<a name="line.122"></a>
 <span class="sourceLineNo">123</span>    int index = 0;<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    while (index &lt; s1.length &amp;&amp; index &lt; s2.length) {<a name="line.124"></a>
-<span class="sourceLineNo">125</span>      int va = VERY_LARGE_NUMBER, vb = VERY_LARGE_NUMBER;<a name="line.125"></a>
-<span class="sourceLineNo">126</span>      try {<a name="line.126"></a>
-<span class="sourceLineNo">127</span>        va = Integer.parseInt(s1[index]);<a name="line.127"></a>
-<span class="sourceLineNo">128</span>      } catch (Exception ingore) {<a name="line.128"></a>
-<span class="sourceLineNo">129</span>      }<a name="line.129"></a>
-<span class="sourceLineNo">130</span>      try {<a name="line.130"></a>
-<span class="sourceLineNo">131</span>        vb = Integer.parseInt(s2[index]);<a name="line.131"></a>
-<span class="sourceLineNo">132</span>      } catch (Exception ingore) {<a name="line.132"></a>
-<span class="sourceLineNo">133</span>      }<a name="line.133"></a>
-<span class="sourceLineNo">134</span>      if (va != vb) {<a name="line.134"></a>
-<span class="sourceLineNo">135</span>        return va - vb;<a name="line.135"></a>
-<span class="sourceLineNo">136</span>      }<a name="line.136"></a>
-<span class="sourceLineNo">137</span>      if (va == VERY_LARGE_NUMBER) {<a name="line.137"></a>
-<span class="sourceLineNo">138</span>        // compare as String<a name="line.138"></a>
-<span class="sourceLineNo">139</span>        int c = s1[index].compareTo(s2[index]);<a name="line.139"></a>
-<span class="sourceLineNo">140</span>        if (c != 0) {<a name="line.140"></a>
-<span class="sourceLineNo">141</span>          return c;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>        }<a name="line.142"></a>
-<span class="sourceLineNo">143</span>      }<a name="line.143"></a>
-<span class="sourceLineNo">144</span>      index++;<a name="line.144"></a>
-<span class="sourceLineNo">145</span>    }<a name="line.145"></a>
-<span class="sourceLineNo">146</span>    if (index &lt; s1.length) {<a name="line.146"></a>
-<span class="sourceLineNo">147</span>      // s1 is longer<a name="line.147"></a>
-<span class="sourceLineNo">148</span>      return 1;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>    }<a name="line.149"></a>
-<span class="sourceLineNo">150</span>    //s2 is longer<a name="line.150"></a>
-<span class="sourceLineNo">151</span>    return -1;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>  }<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>  public static void main(String[] args) {<a name="line.154"></a>
-<span class="sourceLineNo">155</span>    writeTo(System.out);<a name="line.155"></a>
-<span class="sourceLineNo">156</span>  }<a name="line.156"></a>
-<span class="sourceLineNo">157</span>}<a name="line.157"></a>
+<span class="sourceLineNo">124</span>    while (index &lt; v1Comps.length &amp;&amp; index &lt; v2Comps.length) {<a name="line.124"></a>
+<span class="sourceLineNo">125</span>      int va = v1Comps[index] instanceof Integer ? (Integer)v1Comps[index] : VERY_LARGE_NUMBER;<a name="line.125"></a>
+<span class="sourceLineNo">126</span>      int vb = v2Comps[index] instanceof Integer ? (Integer)v2Comps[index] : VERY_LARGE_NUMBER;<a name="line.126"></a>
+<span class="sourceLineNo">127</span><a name="line.127"></a>
+<span class="sourceLineNo">128</span>      if (va != vb) {<a name="line.128"></a>
+<span class="sourceLineNo">129</span>        return va - vb;<a name="line.129"></a>
+<span class="sourceLineNo">130</span>      }<a name="line.130"></a>
+<span class="sourceLineNo">131</span>      if (va == VERY_LARGE_NUMBER) {<a name="line.131"></a>
+<span class="sourceLineNo">132</span>        // here, va and vb components must be same and Strings, compare as String<a name="line.132"></a>
+<span class="sourceLineNo">133</span>        int c = ((String)v1Comps[index]).compareTo((String)v2Comps[index]);<a name="line.133"></a>
+<span class="sourceLineNo">134</span>        if (c != 0) {<a name="line.134"></a>
+<span class="sourceLineNo">135</span>          return c;<a name="line.135"></a>
+<span class="sourceLineNo">136</span>        }<a name="line.136"></a>
+<span class="sourceLineNo">137</span>      }<a name="line.137"></a>
+<span class="sourceLineNo">138</span>      index++;<a name="line.138"></a>
+<span class="sourceLineNo">139</span>    }<a name="line.139"></a>
+<span class="sourceLineNo">140</span>    if (index &lt; v1Comps.length) {<a name="line.140"></a>
+<span class="sourceLineNo">141</span>      // v1 is longer<a name="line.141"></a>
+<span class="sourceLineNo">142</span>      return 1;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    }<a name="line.143"></a>
+<span class="sourceLineNo">144</span>    //v2 is longer<a name="line.144"></a>
+<span class="sourceLineNo">145</span>    return -1;<a name="line.145"></a>
+<span class="sourceLineNo">146</span>  }<a name="line.146"></a>
+<span class="sourceLineNo">147</span><a name="line.147"></a>
+<span class="sourceLineNo">148</span>  /**<a name="line.148"></a>
+<span class="sourceLineNo">149</span>   * Returns the version components as Integer and String objects<a name="line.149"></a>
+<span class="sourceLineNo">150</span>   * Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"]<a name="line.150"></a>
+<span class="sourceLineNo">151</span>   * @return the components of the version string<a name="line.151"></a>
+<span class="sourceLineNo">152</span>   */<a name="line.152"></a>
+<span class="sourceLineNo">153</span>  static Object[] getVersionComponents(final String version) {<a name="line.153"></a>
+<span class="sourceLineNo">154</span>    assert(version != null);<a name="line.154"></a>
+<span class="sourceLineNo">155</span>    Object[] strComps = version.split("[\\.-]");<a name="line.155"></a>
+<span class="sourceLineNo">156</span>    assert(strComps.length &gt; 0);<a name="line.156"></a>
+<span class="sourceLineNo">157</span><a name="line.157"></a>
+<span class="sourceLineNo">158</span>    Object[] comps = new Object[strComps.length];<a name="line.158"></a>
+<span class="sourceLineNo">159</span>    for (int i = 0; i &lt; strComps.length; ++i) {<a name="line.159"></a>
+<span class="sourceLineNo">160</span>      try {<a name="line.160"></a>
+<span class="sourceLineNo">161</span>        comps[i] = Integer.parseInt((String) strComps[i]);<a name="line.161"></a>
+<span class="sourceLineNo">162</span>      } catch (NumberFormatException e) {<a name="line.162"></a>
+<span class="sourceLineNo">163</span>        comps[i] = strComps[i];<a name="line.163"></a>
+<span class="sourceLineNo">164</span>      }<a name="line.164"></a>
+<span class="sourceLineNo">165</span>    }<a name="line.165"></a>
+<span class="sourceLineNo">166</span>    return comps;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>  }<a name="line.167"></a>
+<span class="sourceLineNo">168</span><a name="line.168"></a>
+<span class="sourceLineNo">169</span>  public static void main(String[] args) {<a name="line.169"></a>
+<span class="sourceLineNo">170</span>    writeTo(System.out);<a name="line.170"></a>
+<span class="sourceLineNo">171</span>  }<a name="line.171"></a>
+<span class="sourceLineNo">172</span>}<a name="line.172"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/book.html
----------------------------------------------------------------------
diff --git a/book.html b/book.html
index ed082c7..4212d1d 100644
--- a/book.html
+++ b/book.html
@@ -3082,6 +3082,21 @@ Some configurations would only appear in source code; the only way to identify t
 </dd>
 </dl>
 </div>
+<div id="hbase.normalizer.min.region.count" class="dlist">
+<dl>
+<dt class="hdlist1"><code>hbase.normalizer.min.region.count</code></dt>
+<dd>
+<div class="paragraph">
+<div class="title">Description</div>
+<p>configure the minimum number of regions</p>
+</div>
+<div class="paragraph">
+<div class="title">Default</div>
+<p><code>3</code></p>
+</div>
+</dd>
+</dl>
+</div>
 <div id="hbase.regions.slop" class="dlist">
 <dl>
 <dt class="hdlist1"><code>hbase.regions.slop</code></dt>
@@ -37715,7 +37730,7 @@ The server will return cellblocks compressed using this same compressor as long
 <div id="footer">
 <div id="footer-text">
 Version 3.0.0-SNAPSHOT<br>
-Last updated 2018-04-18 14:29:52 UTC
+Last updated 2018-04-19 14:29:51 UTC
 </div>
 </div>
 </body>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/bulk-loads.html
----------------------------------------------------------------------
diff --git a/bulk-loads.html b/bulk-loads.html
index 3d3f156..ef792c5 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180418" />
+    <meta name="Date-Revision-yyyymmdd" content="20180419" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Bulk Loads in Apache HBase (TM)
@@ -306,7 +306,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-04-18</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-04-19</li>
             </p>
                 </div>
 


[28/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
index c938b47..9267f53 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
@@ -30,205 +30,209 @@
 <span class="sourceLineNo">022</span>import java.util.Collections;<a name="line.22"></a>
 <span class="sourceLineNo">023</span>import java.util.Comparator;<a name="line.23"></a>
 <span class="sourceLineNo">024</span>import java.util.List;<a name="line.24"></a>
-<span class="sourceLineNo">025</span>import org.apache.hadoop.hbase.HBaseIOException;<a name="line.25"></a>
-<span class="sourceLineNo">026</span>import org.apache.hadoop.hbase.RegionMetrics;<a name="line.26"></a>
-<span class="sourceLineNo">027</span>import org.apache.hadoop.hbase.ServerName;<a name="line.27"></a>
-<span class="sourceLineNo">028</span>import org.apache.hadoop.hbase.Size;<a name="line.28"></a>
-<span class="sourceLineNo">029</span>import org.apache.hadoop.hbase.TableName;<a name="line.29"></a>
-<span class="sourceLineNo">030</span>import org.apache.hadoop.hbase.client.MasterSwitchType;<a name="line.30"></a>
-<span class="sourceLineNo">031</span>import org.apache.hadoop.hbase.client.RegionInfo;<a name="line.31"></a>
-<span class="sourceLineNo">032</span>import org.apache.hadoop.hbase.master.MasterRpcServices;<a name="line.32"></a>
-<span class="sourceLineNo">033</span>import org.apache.hadoop.hbase.master.MasterServices;<a name="line.33"></a>
-<span class="sourceLineNo">034</span>import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;<a name="line.34"></a>
-<span class="sourceLineNo">035</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.35"></a>
-<span class="sourceLineNo">036</span>import org.slf4j.Logger;<a name="line.36"></a>
-<span class="sourceLineNo">037</span>import org.slf4j.LoggerFactory;<a name="line.37"></a>
-<span class="sourceLineNo">038</span><a name="line.38"></a>
-<span class="sourceLineNo">039</span>import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;<a name="line.39"></a>
-<span class="sourceLineNo">040</span><a name="line.40"></a>
-<span class="sourceLineNo">041</span>/**<a name="line.41"></a>
-<span class="sourceLineNo">042</span> * Simple implementation of region normalizer.<a name="line.42"></a>
-<span class="sourceLineNo">043</span> *<a name="line.43"></a>
-<span class="sourceLineNo">044</span> * Logic in use:<a name="line.44"></a>
-<span class="sourceLineNo">045</span> *<a name="line.45"></a>
-<span class="sourceLineNo">046</span> *  &lt;ol&gt;<a name="line.46"></a>
-<span class="sourceLineNo">047</span> *  &lt;li&gt; Get all regions of a given table<a name="line.47"></a>
-<span class="sourceLineNo">048</span> *  &lt;li&gt; Get avg size S of each region (by total size of store files reported in RegionMetrics)<a name="line.48"></a>
-<span class="sourceLineNo">049</span> *  &lt;li&gt; Seek every single region one by one. If a region R0 is bigger than S * 2, it is<a name="line.49"></a>
-<span class="sourceLineNo">050</span> *  kindly requested to split. Thereon evaluate the next region R1<a name="line.50"></a>
-<span class="sourceLineNo">051</span> *  &lt;li&gt; Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.<a name="line.51"></a>
-<span class="sourceLineNo">052</span> *  Thereon evaluate the next region R2<a name="line.52"></a>
-<span class="sourceLineNo">053</span> *  &lt;li&gt; Otherwise, R1 is evaluated<a name="line.53"></a>
-<span class="sourceLineNo">054</span> * &lt;/ol&gt;<a name="line.54"></a>
-<span class="sourceLineNo">055</span> * &lt;p&gt;<a name="line.55"></a>
-<span class="sourceLineNo">056</span> * Region sizes are coarse and approximate on the order of megabytes. Additionally,<a name="line.56"></a>
-<span class="sourceLineNo">057</span> * "empty" regions (less than 1MB, with the previous note) are not merged away. This<a name="line.57"></a>
-<span class="sourceLineNo">058</span> * is by design to prevent normalization from undoing the pre-splitting of a table.<a name="line.58"></a>
-<span class="sourceLineNo">059</span> */<a name="line.59"></a>
-<span class="sourceLineNo">060</span>@InterfaceAudience.Private<a name="line.60"></a>
-<span class="sourceLineNo">061</span>public class SimpleRegionNormalizer implements RegionNormalizer {<a name="line.61"></a>
-<span class="sourceLineNo">062</span><a name="line.62"></a>
-<span class="sourceLineNo">063</span>  private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class);<a name="line.63"></a>
-<span class="sourceLineNo">064</span>  private static final int MIN_REGION_COUNT = 3;<a name="line.64"></a>
-<span class="sourceLineNo">065</span>  private MasterServices masterServices;<a name="line.65"></a>
-<span class="sourceLineNo">066</span>  private MasterRpcServices masterRpcServices;<a name="line.66"></a>
-<span class="sourceLineNo">067</span>  private static long[] skippedCount = new long[NormalizationPlan.PlanType.values().length];<a name="line.67"></a>
-<span class="sourceLineNo">068</span><a name="line.68"></a>
-<span class="sourceLineNo">069</span>  /**<a name="line.69"></a>
-<span class="sourceLineNo">070</span>   * Set the master service.<a name="line.70"></a>
-<span class="sourceLineNo">071</span>   * @param masterServices inject instance of MasterServices<a name="line.71"></a>
-<span class="sourceLineNo">072</span>   */<a name="line.72"></a>
-<span class="sourceLineNo">073</span>  @Override<a name="line.73"></a>
-<span class="sourceLineNo">074</span>  public void setMasterServices(MasterServices masterServices) {<a name="line.74"></a>
-<span class="sourceLineNo">075</span>    this.masterServices = masterServices;<a name="line.75"></a>
-<span class="sourceLineNo">076</span>  }<a name="line.76"></a>
-<span class="sourceLineNo">077</span><a name="line.77"></a>
-<span class="sourceLineNo">078</span>  @Override<a name="line.78"></a>
-<span class="sourceLineNo">079</span>  public void setMasterRpcServices(MasterRpcServices masterRpcServices) {<a name="line.79"></a>
-<span class="sourceLineNo">080</span>    this.masterRpcServices = masterRpcServices;<a name="line.80"></a>
-<span class="sourceLineNo">081</span>  }<a name="line.81"></a>
-<span class="sourceLineNo">082</span><a name="line.82"></a>
-<span class="sourceLineNo">083</span>  @Override<a name="line.83"></a>
-<span class="sourceLineNo">084</span>  public void planSkipped(RegionInfo hri, PlanType type) {<a name="line.84"></a>
-<span class="sourceLineNo">085</span>    skippedCount[type.ordinal()]++;<a name="line.85"></a>
-<span class="sourceLineNo">086</span>  }<a name="line.86"></a>
-<span class="sourceLineNo">087</span><a name="line.87"></a>
-<span class="sourceLineNo">088</span>  @Override<a name="line.88"></a>
-<span class="sourceLineNo">089</span>  public long getSkippedCount(NormalizationPlan.PlanType type) {<a name="line.89"></a>
-<span class="sourceLineNo">090</span>    return skippedCount[type.ordinal()];<a name="line.90"></a>
-<span class="sourceLineNo">091</span>  }<a name="line.91"></a>
-<span class="sourceLineNo">092</span><a name="line.92"></a>
-<span class="sourceLineNo">093</span>  /**<a name="line.93"></a>
-<span class="sourceLineNo">094</span>   * Comparator class that gives higher priority to region Split plan.<a name="line.94"></a>
-<span class="sourceLineNo">095</span>   */<a name="line.95"></a>
-<span class="sourceLineNo">096</span>  static class PlanComparator implements Comparator&lt;NormalizationPlan&gt; {<a name="line.96"></a>
-<span class="sourceLineNo">097</span>    @Override<a name="line.97"></a>
-<span class="sourceLineNo">098</span>    public int compare(NormalizationPlan plan1, NormalizationPlan plan2) {<a name="line.98"></a>
-<span class="sourceLineNo">099</span>      boolean plan1IsSplit = plan1 instanceof SplitNormalizationPlan;<a name="line.99"></a>
-<span class="sourceLineNo">100</span>      boolean plan2IsSplit = plan2 instanceof SplitNormalizationPlan;<a name="line.100"></a>
-<span class="sourceLineNo">101</span>      if (plan1IsSplit &amp;&amp; plan2IsSplit) {<a name="line.101"></a>
-<span class="sourceLineNo">102</span>        return 0;<a name="line.102"></a>
-<span class="sourceLineNo">103</span>      } else if (plan1IsSplit) {<a name="line.103"></a>
-<span class="sourceLineNo">104</span>        return -1;<a name="line.104"></a>
-<span class="sourceLineNo">105</span>      } else if (plan2IsSplit) {<a name="line.105"></a>
-<span class="sourceLineNo">106</span>        return 1;<a name="line.106"></a>
-<span class="sourceLineNo">107</span>      } else {<a name="line.107"></a>
-<span class="sourceLineNo">108</span>        return 0;<a name="line.108"></a>
-<span class="sourceLineNo">109</span>      }<a name="line.109"></a>
-<span class="sourceLineNo">110</span>    }<a name="line.110"></a>
-<span class="sourceLineNo">111</span>  }<a name="line.111"></a>
-<span class="sourceLineNo">112</span><a name="line.112"></a>
-<span class="sourceLineNo">113</span>  private Comparator&lt;NormalizationPlan&gt; planComparator = new PlanComparator();<a name="line.113"></a>
-<span class="sourceLineNo">114</span><a name="line.114"></a>
-<span class="sourceLineNo">115</span>  /**<a name="line.115"></a>
-<span class="sourceLineNo">116</span>   * Computes next most "urgent" normalization action on the table.<a name="line.116"></a>
-<span class="sourceLineNo">117</span>   * Action may be either a split, or a merge, or no action.<a name="line.117"></a>
-<span class="sourceLineNo">118</span>   *<a name="line.118"></a>
-<span class="sourceLineNo">119</span>   * @param table table to normalize<a name="line.119"></a>
-<span class="sourceLineNo">120</span>   * @return normalization plan to execute<a name="line.120"></a>
-<span class="sourceLineNo">121</span>   */<a name="line.121"></a>
-<span class="sourceLineNo">122</span>  @Override<a name="line.122"></a>
-<span class="sourceLineNo">123</span>  public List&lt;NormalizationPlan&gt; computePlanForTable(TableName table) throws HBaseIOException {<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    if (table == null || table.isSystemTable()) {<a name="line.124"></a>
-<span class="sourceLineNo">125</span>      LOG.debug("Normalization of system table " + table + " isn't allowed");<a name="line.125"></a>
-<span class="sourceLineNo">126</span>      return null;<a name="line.126"></a>
-<span class="sourceLineNo">127</span>    }<a name="line.127"></a>
-<span class="sourceLineNo">128</span><a name="line.128"></a>
-<span class="sourceLineNo">129</span>    List&lt;NormalizationPlan&gt; plans = new ArrayList&lt;&gt;();<a name="line.129"></a>
-<span class="sourceLineNo">130</span>    List&lt;RegionInfo&gt; tableRegions = masterServices.getAssignmentManager().getRegionStates().<a name="line.130"></a>
-<span class="sourceLineNo">131</span>      getRegionsOfTable(table);<a name="line.131"></a>
+<span class="sourceLineNo">025</span>import org.apache.hadoop.hbase.HBaseConfiguration;<a name="line.25"></a>
+<span class="sourceLineNo">026</span>import org.apache.hadoop.hbase.HBaseIOException;<a name="line.26"></a>
+<span class="sourceLineNo">027</span>import org.apache.hadoop.hbase.RegionMetrics;<a name="line.27"></a>
+<span class="sourceLineNo">028</span>import org.apache.hadoop.hbase.ServerName;<a name="line.28"></a>
+<span class="sourceLineNo">029</span>import org.apache.hadoop.hbase.Size;<a name="line.29"></a>
+<span class="sourceLineNo">030</span>import org.apache.hadoop.hbase.TableName;<a name="line.30"></a>
+<span class="sourceLineNo">031</span>import org.apache.hadoop.hbase.client.MasterSwitchType;<a name="line.31"></a>
+<span class="sourceLineNo">032</span>import org.apache.hadoop.hbase.client.RegionInfo;<a name="line.32"></a>
+<span class="sourceLineNo">033</span>import org.apache.hadoop.hbase.master.MasterRpcServices;<a name="line.33"></a>
+<span class="sourceLineNo">034</span>import org.apache.hadoop.hbase.master.MasterServices;<a name="line.34"></a>
+<span class="sourceLineNo">035</span>import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;<a name="line.35"></a>
+<span class="sourceLineNo">036</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.36"></a>
+<span class="sourceLineNo">037</span>import org.slf4j.Logger;<a name="line.37"></a>
+<span class="sourceLineNo">038</span>import org.slf4j.LoggerFactory;<a name="line.38"></a>
+<span class="sourceLineNo">039</span><a name="line.39"></a>
+<span class="sourceLineNo">040</span>import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;<a name="line.40"></a>
+<span class="sourceLineNo">041</span><a name="line.41"></a>
+<span class="sourceLineNo">042</span>/**<a name="line.42"></a>
+<span class="sourceLineNo">043</span> * Simple implementation of region normalizer.<a name="line.43"></a>
+<span class="sourceLineNo">044</span> *<a name="line.44"></a>
+<span class="sourceLineNo">045</span> * Logic in use:<a name="line.45"></a>
+<span class="sourceLineNo">046</span> *<a name="line.46"></a>
+<span class="sourceLineNo">047</span> *  &lt;ol&gt;<a name="line.47"></a>
+<span class="sourceLineNo">048</span> *  &lt;li&gt; Get all regions of a given table<a name="line.48"></a>
+<span class="sourceLineNo">049</span> *  &lt;li&gt; Get avg size S of each region (by total size of store files reported in RegionMetrics)<a name="line.49"></a>
+<span class="sourceLineNo">050</span> *  &lt;li&gt; Seek every single region one by one. If a region R0 is bigger than S * 2, it is<a name="line.50"></a>
+<span class="sourceLineNo">051</span> *  kindly requested to split. Thereon evaluate the next region R1<a name="line.51"></a>
+<span class="sourceLineNo">052</span> *  &lt;li&gt; Otherwise, if R0 + R1 is smaller than S, R0 and R1 are kindly requested to merge.<a name="line.52"></a>
+<span class="sourceLineNo">053</span> *  Thereon evaluate the next region R2<a name="line.53"></a>
+<span class="sourceLineNo">054</span> *  &lt;li&gt; Otherwise, R1 is evaluated<a name="line.54"></a>
+<span class="sourceLineNo">055</span> * &lt;/ol&gt;<a name="line.55"></a>
+<span class="sourceLineNo">056</span> * &lt;p&gt;<a name="line.56"></a>
+<span class="sourceLineNo">057</span> * Region sizes are coarse and approximate on the order of megabytes. Additionally,<a name="line.57"></a>
+<span class="sourceLineNo">058</span> * "empty" regions (less than 1MB, with the previous note) are not merged away. This<a name="line.58"></a>
+<span class="sourceLineNo">059</span> * is by design to prevent normalization from undoing the pre-splitting of a table.<a name="line.59"></a>
+<span class="sourceLineNo">060</span> */<a name="line.60"></a>
+<span class="sourceLineNo">061</span>@InterfaceAudience.Private<a name="line.61"></a>
+<span class="sourceLineNo">062</span>public class SimpleRegionNormalizer implements RegionNormalizer {<a name="line.62"></a>
+<span class="sourceLineNo">063</span><a name="line.63"></a>
+<span class="sourceLineNo">064</span>  private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class);<a name="line.64"></a>
+<span class="sourceLineNo">065</span>  private int minRegionCount;<a name="line.65"></a>
+<span class="sourceLineNo">066</span>  private MasterServices masterServices;<a name="line.66"></a>
+<span class="sourceLineNo">067</span>  private MasterRpcServices masterRpcServices;<a name="line.67"></a>
+<span class="sourceLineNo">068</span>  private static long[] skippedCount = new long[NormalizationPlan.PlanType.values().length];<a name="line.68"></a>
+<span class="sourceLineNo">069</span><a name="line.69"></a>
+<span class="sourceLineNo">070</span>  public SimpleRegionNormalizer() {<a name="line.70"></a>
+<span class="sourceLineNo">071</span>    minRegionCount = HBaseConfiguration.create().getInt("hbase.normalizer.min.region.count", 3);<a name="line.71"></a>
+<span class="sourceLineNo">072</span>  }<a name="line.72"></a>
+<span class="sourceLineNo">073</span>  /**<a name="line.73"></a>
+<span class="sourceLineNo">074</span>   * Set the master service.<a name="line.74"></a>
+<span class="sourceLineNo">075</span>   * @param masterServices inject instance of MasterServices<a name="line.75"></a>
+<span class="sourceLineNo">076</span>   */<a name="line.76"></a>
+<span class="sourceLineNo">077</span>  @Override<a name="line.77"></a>
+<span class="sourceLineNo">078</span>  public void setMasterServices(MasterServices masterServices) {<a name="line.78"></a>
+<span class="sourceLineNo">079</span>    this.masterServices = masterServices;<a name="line.79"></a>
+<span class="sourceLineNo">080</span>  }<a name="line.80"></a>
+<span class="sourceLineNo">081</span><a name="line.81"></a>
+<span class="sourceLineNo">082</span>  @Override<a name="line.82"></a>
+<span class="sourceLineNo">083</span>  public void setMasterRpcServices(MasterRpcServices masterRpcServices) {<a name="line.83"></a>
+<span class="sourceLineNo">084</span>    this.masterRpcServices = masterRpcServices;<a name="line.84"></a>
+<span class="sourceLineNo">085</span>  }<a name="line.85"></a>
+<span class="sourceLineNo">086</span><a name="line.86"></a>
+<span class="sourceLineNo">087</span>  @Override<a name="line.87"></a>
+<span class="sourceLineNo">088</span>  public void planSkipped(RegionInfo hri, PlanType type) {<a name="line.88"></a>
+<span class="sourceLineNo">089</span>    skippedCount[type.ordinal()]++;<a name="line.89"></a>
+<span class="sourceLineNo">090</span>  }<a name="line.90"></a>
+<span class="sourceLineNo">091</span><a name="line.91"></a>
+<span class="sourceLineNo">092</span>  @Override<a name="line.92"></a>
+<span class="sourceLineNo">093</span>  public long getSkippedCount(NormalizationPlan.PlanType type) {<a name="line.93"></a>
+<span class="sourceLineNo">094</span>    return skippedCount[type.ordinal()];<a name="line.94"></a>
+<span class="sourceLineNo">095</span>  }<a name="line.95"></a>
+<span class="sourceLineNo">096</span><a name="line.96"></a>
+<span class="sourceLineNo">097</span>  /**<a name="line.97"></a>
+<span class="sourceLineNo">098</span>   * Comparator class that gives higher priority to region Split plan.<a name="line.98"></a>
+<span class="sourceLineNo">099</span>   */<a name="line.99"></a>
+<span class="sourceLineNo">100</span>  static class PlanComparator implements Comparator&lt;NormalizationPlan&gt; {<a name="line.100"></a>
+<span class="sourceLineNo">101</span>    @Override<a name="line.101"></a>
+<span class="sourceLineNo">102</span>    public int compare(NormalizationPlan plan1, NormalizationPlan plan2) {<a name="line.102"></a>
+<span class="sourceLineNo">103</span>      boolean plan1IsSplit = plan1 instanceof SplitNormalizationPlan;<a name="line.103"></a>
+<span class="sourceLineNo">104</span>      boolean plan2IsSplit = plan2 instanceof SplitNormalizationPlan;<a name="line.104"></a>
+<span class="sourceLineNo">105</span>      if (plan1IsSplit &amp;&amp; plan2IsSplit) {<a name="line.105"></a>
+<span class="sourceLineNo">106</span>        return 0;<a name="line.106"></a>
+<span class="sourceLineNo">107</span>      } else if (plan1IsSplit) {<a name="line.107"></a>
+<span class="sourceLineNo">108</span>        return -1;<a name="line.108"></a>
+<span class="sourceLineNo">109</span>      } else if (plan2IsSplit) {<a name="line.109"></a>
+<span class="sourceLineNo">110</span>        return 1;<a name="line.110"></a>
+<span class="sourceLineNo">111</span>      } else {<a name="line.111"></a>
+<span class="sourceLineNo">112</span>        return 0;<a name="line.112"></a>
+<span class="sourceLineNo">113</span>      }<a name="line.113"></a>
+<span class="sourceLineNo">114</span>    }<a name="line.114"></a>
+<span class="sourceLineNo">115</span>  }<a name="line.115"></a>
+<span class="sourceLineNo">116</span><a name="line.116"></a>
+<span class="sourceLineNo">117</span>  private Comparator&lt;NormalizationPlan&gt; planComparator = new PlanComparator();<a name="line.117"></a>
+<span class="sourceLineNo">118</span><a name="line.118"></a>
+<span class="sourceLineNo">119</span>  /**<a name="line.119"></a>
+<span class="sourceLineNo">120</span>   * Computes next most "urgent" normalization action on the table.<a name="line.120"></a>
+<span class="sourceLineNo">121</span>   * Action may be either a split, or a merge, or no action.<a name="line.121"></a>
+<span class="sourceLineNo">122</span>   *<a name="line.122"></a>
+<span class="sourceLineNo">123</span>   * @param table table to normalize<a name="line.123"></a>
+<span class="sourceLineNo">124</span>   * @return normalization plan to execute<a name="line.124"></a>
+<span class="sourceLineNo">125</span>   */<a name="line.125"></a>
+<span class="sourceLineNo">126</span>  @Override<a name="line.126"></a>
+<span class="sourceLineNo">127</span>  public List&lt;NormalizationPlan&gt; computePlanForTable(TableName table) throws HBaseIOException {<a name="line.127"></a>
+<span class="sourceLineNo">128</span>    if (table == null || table.isSystemTable()) {<a name="line.128"></a>
+<span class="sourceLineNo">129</span>      LOG.debug("Normalization of system table " + table + " isn't allowed");<a name="line.129"></a>
+<span class="sourceLineNo">130</span>      return null;<a name="line.130"></a>
+<span class="sourceLineNo">131</span>    }<a name="line.131"></a>
 <span class="sourceLineNo">132</span><a name="line.132"></a>
-<span class="sourceLineNo">133</span>    //TODO: should we make min number of regions a config param?<a name="line.133"></a>
-<span class="sourceLineNo">134</span>    if (tableRegions == null || tableRegions.size() &lt; MIN_REGION_COUNT) {<a name="line.134"></a>
-<span class="sourceLineNo">135</span>      int nrRegions = tableRegions == null ? 0 : tableRegions.size();<a name="line.135"></a>
-<span class="sourceLineNo">136</span>      LOG.debug("Table " + table + " has " + nrRegions + " regions, required min number"<a name="line.136"></a>
-<span class="sourceLineNo">137</span>        + " of regions for normalizer to run is " + MIN_REGION_COUNT + ", not running normalizer");<a name="line.137"></a>
-<span class="sourceLineNo">138</span>      return null;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>    }<a name="line.139"></a>
-<span class="sourceLineNo">140</span><a name="line.140"></a>
-<span class="sourceLineNo">141</span>    LOG.debug("Computing normalization plan for table: " + table +<a name="line.141"></a>
-<span class="sourceLineNo">142</span>      ", number of regions: " + tableRegions.size());<a name="line.142"></a>
-<span class="sourceLineNo">143</span><a name="line.143"></a>
-<span class="sourceLineNo">144</span>    long totalSizeMb = 0;<a name="line.144"></a>
-<span class="sourceLineNo">145</span>    int acutalRegionCnt = 0;<a name="line.145"></a>
-<span class="sourceLineNo">146</span><a name="line.146"></a>
-<span class="sourceLineNo">147</span>    for (int i = 0; i &lt; tableRegions.size(); i++) {<a name="line.147"></a>
-<span class="sourceLineNo">148</span>      RegionInfo hri = tableRegions.get(i);<a name="line.148"></a>
-<span class="sourceLineNo">149</span>      long regionSize = getRegionSize(hri);<a name="line.149"></a>
-<span class="sourceLineNo">150</span>      if (regionSize &gt; 0) {<a name="line.150"></a>
-<span class="sourceLineNo">151</span>        acutalRegionCnt++;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>        totalSizeMb += regionSize;<a name="line.152"></a>
-<span class="sourceLineNo">153</span>      }<a name="line.153"></a>
-<span class="sourceLineNo">154</span>    }<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>    double avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) acutalRegionCnt;<a name="line.156"></a>
-<span class="sourceLineNo">157</span><a name="line.157"></a>
-<span class="sourceLineNo">158</span>    LOG.debug("Table " + table + ", total aggregated regions size: " + totalSizeMb);<a name="line.158"></a>
-<span class="sourceLineNo">159</span>    LOG.debug("Table " + table + ", average region size: " + avgRegionSize);<a name="line.159"></a>
-<span class="sourceLineNo">160</span><a name="line.160"></a>
-<span class="sourceLineNo">161</span>    int candidateIdx = 0;<a name="line.161"></a>
-<span class="sourceLineNo">162</span>    boolean splitEnabled = true, mergeEnabled = true;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>    try {<a name="line.163"></a>
-<span class="sourceLineNo">164</span>      splitEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.164"></a>
-<span class="sourceLineNo">165</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled();<a name="line.165"></a>
-<span class="sourceLineNo">166</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.166"></a>
-<span class="sourceLineNo">167</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.167"></a>
-<span class="sourceLineNo">168</span>    }<a name="line.168"></a>
-<span class="sourceLineNo">169</span>    try {<a name="line.169"></a>
-<span class="sourceLineNo">170</span>      mergeEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.170"></a>
-<span class="sourceLineNo">171</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled();<a name="line.171"></a>
-<span class="sourceLineNo">172</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.172"></a>
-<span class="sourceLineNo">173</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.173"></a>
-<span class="sourceLineNo">174</span>    }<a name="line.174"></a>
-<span class="sourceLineNo">175</span>    while (candidateIdx &lt; tableRegions.size()) {<a name="line.175"></a>
-<span class="sourceLineNo">176</span>      RegionInfo hri = tableRegions.get(candidateIdx);<a name="line.176"></a>
-<span class="sourceLineNo">177</span>      long regionSize = getRegionSize(hri);<a name="line.177"></a>
-<span class="sourceLineNo">178</span>      // if the region is &gt; 2 times larger than average, we split it, split<a name="line.178"></a>
-<span class="sourceLineNo">179</span>      // is more high priority normalization action than merge.<a name="line.179"></a>
-<span class="sourceLineNo">180</span>      if (regionSize &gt; 2 * avgRegionSize) {<a name="line.180"></a>
-<span class="sourceLineNo">181</span>        if (splitEnabled) {<a name="line.181"></a>
-<span class="sourceLineNo">182</span>          LOG.info("Table " + table + ", large region " + hri.getRegionNameAsString() + " has size "<a name="line.182"></a>
-<span class="sourceLineNo">183</span>              + regionSize + ", more than twice avg size, splitting");<a name="line.183"></a>
-<span class="sourceLineNo">184</span>          plans.add(new SplitNormalizationPlan(hri, null));<a name="line.184"></a>
-<span class="sourceLineNo">185</span>        }<a name="line.185"></a>
-<span class="sourceLineNo">186</span>      } else {<a name="line.186"></a>
-<span class="sourceLineNo">187</span>        if (candidateIdx == tableRegions.size()-1) {<a name="line.187"></a>
-<span class="sourceLineNo">188</span>          break;<a name="line.188"></a>
+<span class="sourceLineNo">133</span>    List&lt;NormalizationPlan&gt; plans = new ArrayList&lt;&gt;();<a name="line.133"></a>
+<span class="sourceLineNo">134</span>    List&lt;RegionInfo&gt; tableRegions = masterServices.getAssignmentManager().getRegionStates().<a name="line.134"></a>
+<span class="sourceLineNo">135</span>      getRegionsOfTable(table);<a name="line.135"></a>
+<span class="sourceLineNo">136</span><a name="line.136"></a>
+<span class="sourceLineNo">137</span>    //TODO: should we make min number of regions a config param?<a name="line.137"></a>
+<span class="sourceLineNo">138</span>    if (tableRegions == null || tableRegions.size() &lt; minRegionCount) {<a name="line.138"></a>
+<span class="sourceLineNo">139</span>      int nrRegions = tableRegions == null ? 0 : tableRegions.size();<a name="line.139"></a>
+<span class="sourceLineNo">140</span>      LOG.debug("Table " + table + " has " + nrRegions + " regions, required min number"<a name="line.140"></a>
+<span class="sourceLineNo">141</span>        + " of regions for normalizer to run is " + minRegionCount + ", not running normalizer");<a name="line.141"></a>
+<span class="sourceLineNo">142</span>      return null;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    }<a name="line.143"></a>
+<span class="sourceLineNo">144</span><a name="line.144"></a>
+<span class="sourceLineNo">145</span>    LOG.debug("Computing normalization plan for table: " + table +<a name="line.145"></a>
+<span class="sourceLineNo">146</span>      ", number of regions: " + tableRegions.size());<a name="line.146"></a>
+<span class="sourceLineNo">147</span><a name="line.147"></a>
+<span class="sourceLineNo">148</span>    long totalSizeMb = 0;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>    int acutalRegionCnt = 0;<a name="line.149"></a>
+<span class="sourceLineNo">150</span><a name="line.150"></a>
+<span class="sourceLineNo">151</span>    for (int i = 0; i &lt; tableRegions.size(); i++) {<a name="line.151"></a>
+<span class="sourceLineNo">152</span>      RegionInfo hri = tableRegions.get(i);<a name="line.152"></a>
+<span class="sourceLineNo">153</span>      long regionSize = getRegionSize(hri);<a name="line.153"></a>
+<span class="sourceLineNo">154</span>      if (regionSize &gt; 0) {<a name="line.154"></a>
+<span class="sourceLineNo">155</span>        acutalRegionCnt++;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>        totalSizeMb += regionSize;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>      }<a name="line.157"></a>
+<span class="sourceLineNo">158</span>    }<a name="line.158"></a>
+<span class="sourceLineNo">159</span><a name="line.159"></a>
+<span class="sourceLineNo">160</span>    double avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) acutalRegionCnt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span><a name="line.161"></a>
+<span class="sourceLineNo">162</span>    LOG.debug("Table " + table + ", total aggregated regions size: " + totalSizeMb);<a name="line.162"></a>
+<span class="sourceLineNo">163</span>    LOG.debug("Table " + table + ", average region size: " + avgRegionSize);<a name="line.163"></a>
+<span class="sourceLineNo">164</span><a name="line.164"></a>
+<span class="sourceLineNo">165</span>    int candidateIdx = 0;<a name="line.165"></a>
+<span class="sourceLineNo">166</span>    boolean splitEnabled = true, mergeEnabled = true;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>    try {<a name="line.167"></a>
+<span class="sourceLineNo">168</span>      splitEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.168"></a>
+<span class="sourceLineNo">169</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled();<a name="line.169"></a>
+<span class="sourceLineNo">170</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.170"></a>
+<span class="sourceLineNo">171</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.171"></a>
+<span class="sourceLineNo">172</span>    }<a name="line.172"></a>
+<span class="sourceLineNo">173</span>    try {<a name="line.173"></a>
+<span class="sourceLineNo">174</span>      mergeEnabled = masterRpcServices.isSplitOrMergeEnabled(null,<a name="line.174"></a>
+<span class="sourceLineNo">175</span>        RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled();<a name="line.175"></a>
+<span class="sourceLineNo">176</span>    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException e) {<a name="line.176"></a>
+<span class="sourceLineNo">177</span>      LOG.debug("Unable to determine whether split is enabled", e);<a name="line.177"></a>
+<span class="sourceLineNo">178</span>    }<a name="line.178"></a>
+<span class="sourceLineNo">179</span>    while (candidateIdx &lt; tableRegions.size()) {<a name="line.179"></a>
+<span class="sourceLineNo">180</span>      RegionInfo hri = tableRegions.get(candidateIdx);<a name="line.180"></a>
+<span class="sourceLineNo">181</span>      long regionSize = getRegionSize(hri);<a name="line.181"></a>
+<span class="sourceLineNo">182</span>      // if the region is &gt; 2 times larger than average, we split it, split<a name="line.182"></a>
+<span class="sourceLineNo">183</span>      // is more high priority normalization action than merge.<a name="line.183"></a>
+<span class="sourceLineNo">184</span>      if (regionSize &gt; 2 * avgRegionSize) {<a name="line.184"></a>
+<span class="sourceLineNo">185</span>        if (splitEnabled) {<a name="line.185"></a>
+<span class="sourceLineNo">186</span>          LOG.info("Table " + table + ", large region " + hri.getRegionNameAsString() + " has size "<a name="line.186"></a>
+<span class="sourceLineNo">187</span>              + regionSize + ", more than twice avg size, splitting");<a name="line.187"></a>
+<span class="sourceLineNo">188</span>          plans.add(new SplitNormalizationPlan(hri, null));<a name="line.188"></a>
 <span class="sourceLineNo">189</span>        }<a name="line.189"></a>
-<span class="sourceLineNo">190</span>        if (mergeEnabled) {<a name="line.190"></a>
-<span class="sourceLineNo">191</span>          RegionInfo hri2 = tableRegions.get(candidateIdx+1);<a name="line.191"></a>
-<span class="sourceLineNo">192</span>          long regionSize2 = getRegionSize(hri2);<a name="line.192"></a>
-<span class="sourceLineNo">193</span>          if (regionSize &gt;= 0 &amp;&amp; regionSize2 &gt;= 0 &amp;&amp; regionSize + regionSize2 &lt; avgRegionSize) {<a name="line.193"></a>
-<span class="sourceLineNo">194</span>            LOG.info("Table " + table + ", small region size: " + regionSize<a name="line.194"></a>
-<span class="sourceLineNo">195</span>              + " plus its neighbor size: " + regionSize2<a name="line.195"></a>
-<span class="sourceLineNo">196</span>              + ", less than the avg size " + avgRegionSize + ", merging them");<a name="line.196"></a>
-<span class="sourceLineNo">197</span>            plans.add(new MergeNormalizationPlan(hri, hri2));<a name="line.197"></a>
-<span class="sourceLineNo">198</span>            candidateIdx++;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>          }<a name="line.199"></a>
-<span class="sourceLineNo">200</span>        }<a name="line.200"></a>
-<span class="sourceLineNo">201</span>      }<a name="line.201"></a>
-<span class="sourceLineNo">202</span>      candidateIdx++;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>    }<a name="line.203"></a>
-<span class="sourceLineNo">204</span>    if (plans.isEmpty()) {<a name="line.204"></a>
-<span class="sourceLineNo">205</span>      LOG.debug("No normalization needed, regions look good for table: " + table);<a name="line.205"></a>
-<span class="sourceLineNo">206</span>      return null;<a name="line.206"></a>
+<span class="sourceLineNo">190</span>      } else {<a name="line.190"></a>
+<span class="sourceLineNo">191</span>        if (candidateIdx == tableRegions.size()-1) {<a name="line.191"></a>
+<span class="sourceLineNo">192</span>          break;<a name="line.192"></a>
+<span class="sourceLineNo">193</span>        }<a name="line.193"></a>
+<span class="sourceLineNo">194</span>        if (mergeEnabled) {<a name="line.194"></a>
+<span class="sourceLineNo">195</span>          RegionInfo hri2 = tableRegions.get(candidateIdx+1);<a name="line.195"></a>
+<span class="sourceLineNo">196</span>          long regionSize2 = getRegionSize(hri2);<a name="line.196"></a>
+<span class="sourceLineNo">197</span>          if (regionSize &gt;= 0 &amp;&amp; regionSize2 &gt;= 0 &amp;&amp; regionSize + regionSize2 &lt; avgRegionSize) {<a name="line.197"></a>
+<span class="sourceLineNo">198</span>            LOG.info("Table " + table + ", small region size: " + regionSize<a name="line.198"></a>
+<span class="sourceLineNo">199</span>              + " plus its neighbor size: " + regionSize2<a name="line.199"></a>
+<span class="sourceLineNo">200</span>              + ", less than the avg size " + avgRegionSize + ", merging them");<a name="line.200"></a>
+<span class="sourceLineNo">201</span>            plans.add(new MergeNormalizationPlan(hri, hri2));<a name="line.201"></a>
+<span class="sourceLineNo">202</span>            candidateIdx++;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>          }<a name="line.203"></a>
+<span class="sourceLineNo">204</span>        }<a name="line.204"></a>
+<span class="sourceLineNo">205</span>      }<a name="line.205"></a>
+<span class="sourceLineNo">206</span>      candidateIdx++;<a name="line.206"></a>
 <span class="sourceLineNo">207</span>    }<a name="line.207"></a>
-<span class="sourceLineNo">208</span>    Collections.sort(plans, planComparator);<a name="line.208"></a>
-<span class="sourceLineNo">209</span>    return plans;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  }<a name="line.210"></a>
-<span class="sourceLineNo">211</span><a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private long getRegionSize(RegionInfo hri) {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>    ServerName sn = masterServices.getAssignmentManager().getRegionStates().<a name="line.213"></a>
-<span class="sourceLineNo">214</span>      getRegionServerOfRegion(hri);<a name="line.214"></a>
-<span class="sourceLineNo">215</span>    RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).<a name="line.215"></a>
-<span class="sourceLineNo">216</span>      getRegionMetrics().get(hri.getRegionName());<a name="line.216"></a>
-<span class="sourceLineNo">217</span>    if (regionLoad == null) {<a name="line.217"></a>
-<span class="sourceLineNo">218</span>      LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");<a name="line.218"></a>
-<span class="sourceLineNo">219</span>      return -1;<a name="line.219"></a>
-<span class="sourceLineNo">220</span>    }<a name="line.220"></a>
-<span class="sourceLineNo">221</span>    return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  }<a name="line.222"></a>
-<span class="sourceLineNo">223</span>}<a name="line.223"></a>
+<span class="sourceLineNo">208</span>    if (plans.isEmpty()) {<a name="line.208"></a>
+<span class="sourceLineNo">209</span>      LOG.debug("No normalization needed, regions look good for table: " + table);<a name="line.209"></a>
+<span class="sourceLineNo">210</span>      return null;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>    }<a name="line.211"></a>
+<span class="sourceLineNo">212</span>    Collections.sort(plans, planComparator);<a name="line.212"></a>
+<span class="sourceLineNo">213</span>    return plans;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  }<a name="line.214"></a>
+<span class="sourceLineNo">215</span><a name="line.215"></a>
+<span class="sourceLineNo">216</span>  private long getRegionSize(RegionInfo hri) {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>    ServerName sn = masterServices.getAssignmentManager().getRegionStates().<a name="line.217"></a>
+<span class="sourceLineNo">218</span>      getRegionServerOfRegion(hri);<a name="line.218"></a>
+<span class="sourceLineNo">219</span>    RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>      getRegionMetrics().get(hri.getRegionName());<a name="line.220"></a>
+<span class="sourceLineNo">221</span>    if (regionLoad == null) {<a name="line.221"></a>
+<span class="sourceLineNo">222</span>      LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");<a name="line.222"></a>
+<span class="sourceLineNo">223</span>      return -1;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>    }<a name="line.224"></a>
+<span class="sourceLineNo">225</span>    return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);<a name="line.225"></a>
+<span class="sourceLineNo">226</span>  }<a name="line.226"></a>
+<span class="sourceLineNo">227</span>}<a name="line.227"></a>
 
 
 


[15/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.debug("Failed to create lock file

<TRUNCATED>

[10/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineN

<TRUNCATED>

[06/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418"></a>
+<span class="sourceLineNo">419</span>          LOG.deb

<TRUNCATED>

[38/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 8a494b9..8f95e1e 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -441,19 +441,19 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeyValue.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeyValue.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MetaTableAccessor.QueryType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompatibilitySingletonFactory.SingletonStorage.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompatibilitySingletonFactory.SingletonStorage</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompareOperator.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompareOperator</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeepDeletedCells.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeepDeletedCells</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MemoryCompactionPolicy.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MemoryCompactionPolicy</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Cell.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Cell.Type</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MetaTableAccessor.QueryType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HConstants.OperationStatusCode.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HConstants.OperationStatusCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CellBuilderType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CellBuilderType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ClusterMetrics.Option.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ClusterMetrics.Option</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompatibilitySingletonFactory.SingletonStorage.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompatibilitySingletonFactory.SingletonStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Coprocessor.State.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Coprocessor.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeepDeletedCells.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeepDeletedCells</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Size.Unit.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Size.Unit</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CellBuilderType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CellBuilderType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HConstants.OperationStatusCode.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HConstants.OperationStatusCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Cell.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Cell.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeyValue.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeyValue.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HealthChecker.HealthCheckerExitStatus</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Coprocessor.State.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Coprocessor.State</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 79bccf0..f58a1c3 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -212,11 +212,11 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.LockState.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">Procedure.LockState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">RootProcedureState.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">StateMachineProcedure.Flow</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/LockType.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">LockType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/LockedResourceType.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">LockedResourceType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">StateMachineProcedure.Flow</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.LockState.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">Procedure.LockState</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 4b5274d..6a8ab7e 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -229,13 +229,13 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">SpaceViolationPolicy</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/ThrottleType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">ThrottleType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaScope.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">QuotaScope</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/RpcThrottlingException.Type.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">RpcThrottlingException.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/ThrottlingException.Type.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">ThrottlingException.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">SpaceViolationPolicy</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">QuotaType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/RpcThrottlingException.Type.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">RpcThrottlingException.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/OperationQuota.OperationType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">OperationQuota.OperationType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/ThrottleType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">ThrottleType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html b/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
index 5aaad1e..abd4ac3 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
@@ -226,17 +226,15 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 </td>
 </tr>
 <tr id="i2" class="altColor">
-<td class="colFirst"><code>static long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-long-java.util.List-">addToScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
+<td class="colFirst"><code>static void</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-java.util.List-long-java.util.List-">addToScanners</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
              long&nbsp;readPt,
-             long&nbsp;order,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr id="i3" class="rowColor">
-<td class="colFirst"><code>protected static long</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">addToScanners</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
+<td class="colFirst"><code>protected static void</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">addToScanners</a></span>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
              long&nbsp;readPt,
-             long&nbsp;order,
              <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</code>&nbsp;</td>
 </tr>
 <tr id="i4" class="altColor">
@@ -487,7 +485,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockListLast">
 <li class="blockList">
 <h4>AbstractMemStore</h4>
-<pre>protected&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.77">AbstractMemStore</a>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+<pre>protected&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.75">AbstractMemStore</a>(org.apache.hadoop.conf.Configuration&nbsp;conf,
                            <a href="../../../../../org/apache/hadoop/hbase/CellComparator.html" title="interface in org.apache.hadoop.hbase">CellComparator</a>&nbsp;c)</pre>
 </li>
 </ul>
@@ -499,27 +497,25 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <!--   -->
 </a>
 <h3>Method Detail</h3>
-<a name="addToScanners-java.util.List-long-long-java.util.List-">
+<a name="addToScanners-java.util.List-long-java.util.List-">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>addToScanners</h4>
-<pre>public static&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.63">addToScanners</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
+<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.63">addToScanners</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;segments,
                                  long&nbsp;readPt,
-                                 long&nbsp;order,
                                  <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</pre>
 </li>
 </ul>
-<a name="addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-long-java.util.List-">
+<a name="addToScanners-org.apache.hadoop.hbase.regionserver.Segment-long-java.util.List-">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>addToScanners</h4>
-<pre>protected static&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.71">addToScanners</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
+<pre>protected static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.70">addToScanners</a>(<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&nbsp;segment,
                                     long&nbsp;readPt,
-                                    long&nbsp;order,
                                     <a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/KeyValueScanner.html" title="interface in org.apache.hadoop.hbase.regionserver">KeyValueScanner</a>&gt;&nbsp;scanners)</pre>
 </li>
 </ul>
@@ -529,7 +525,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>resetActive</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.85">resetActive</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.83">resetActive</a>()</pre>
 </li>
 </ul>
 <a name="updateLowestUnflushedSequenceIdInWAL-boolean-">
@@ -538,7 +534,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>updateLowestUnflushedSequenceIdInWAL</h4>
-<pre>public abstract&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.96">updateLowestUnflushedSequenceIdInWAL</a>(boolean&nbsp;onlyIfMoreRecent)</pre>
+<pre>public abstract&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.94">updateLowestUnflushedSequenceIdInWAL</a>(boolean&nbsp;onlyIfMoreRecent)</pre>
 <div class="block">Updates the wal with the lowest sequence id (oldest entry) that is still in memory</div>
 <dl>
 <dt><span class="paramLabel">Parameters:</span></dt>
@@ -553,7 +549,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>add</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.99">add</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true" title="class or interface in java.lang">Iterable</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;cells,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.97">add</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true" title="class or interface in java.lang">Iterable</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;cells,
                 <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#add-java.lang.Iterable-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">MemStore</a></code></span></div>
 <div class="block">Write the updates</div>
@@ -571,7 +567,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>add</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.106">add</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.104">add</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                 <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#add-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">MemStore</a></code></span></div>
 <div class="block">Write an update</div>
@@ -589,7 +585,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>deepCopyIfNeeded</h4>
-<pre>private static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.124">deepCopyIfNeeded</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
+<pre>private static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.122">deepCopyIfNeeded</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell)</pre>
 </li>
 </ul>
 <a name="upsert-java.lang.Iterable-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">
@@ -598,7 +594,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>upsert</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.132">upsert</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true" title="class or interface in java.lang">Iterable</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;cells,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.130">upsert</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true" title="class or interface in java.lang">Iterable</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;cells,
                    long&nbsp;readpoint,
                    <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#upsert-java.lang.Iterable-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">MemStore</a></code></span></div>
@@ -627,7 +623,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>timeOfOldestEdit</h4>
-<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.142">timeOfOldestEdit</a>()</pre>
+<pre>public&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.140">timeOfOldestEdit</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#timeOfOldestEdit--">timeOfOldestEdit</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html" title="interface in org.apache.hadoop.hbase.regionserver">MemStore</a></code></dd>
@@ -642,7 +638,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>clearSnapshot</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.152">clearSnapshot</a>(long&nbsp;id)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.150">clearSnapshot</a>(long&nbsp;id)
                    throws <a href="../../../../../org/apache/hadoop/hbase/exceptions/UnexpectedStateException.html" title="class in org.apache.hadoop.hbase.exceptions">UnexpectedStateException</a></pre>
 <div class="block">The passed snapshot was successfully persisted; it can be let go.</div>
 <dl>
@@ -663,7 +659,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getSnapshotSize</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSize.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSize</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.169">getSnapshotSize</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSize.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSize</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.167">getSnapshotSize</a>()</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore.html#getSnapshotSize--">MemStore</a></code></span></div>
 <div class="block">Return the size of the snapshot(s) if any</div>
 <dl>
@@ -680,7 +676,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getSnapshotSizing</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.173">getSnapshotSizing</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.171">getSnapshotSizing</a>()</pre>
 </li>
 </ul>
 <a name="toString--">
@@ -689,7 +685,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.180">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.178">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>
@@ -702,7 +698,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getConfiguration</h4>
-<pre>protected&nbsp;org.apache.hadoop.conf.Configuration&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.194">getConfiguration</a>()</pre>
+<pre>protected&nbsp;org.apache.hadoop.conf.Configuration&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.192">getConfiguration</a>()</pre>
 </li>
 </ul>
 <a name="dump-org.slf4j.Logger-">
@@ -711,7 +707,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>dump</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.198">dump</a>(org.slf4j.Logger&nbsp;log)</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.196">dump</a>(org.slf4j.Logger&nbsp;log)</pre>
 </li>
 </ul>
 <a name="upsert-org.apache.hadoop.hbase.Cell-long-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">
@@ -720,7 +716,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>upsert</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.219">upsert</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.217">upsert</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                     long&nbsp;readpoint,
                     <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 </li>
@@ -731,7 +727,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getLowest</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.240">getLowest</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;a,
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.238">getLowest</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;a,
                          <a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;b)</pre>
 </li>
 </ul>
@@ -741,7 +737,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getNextRow</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.256">getNextRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;key,
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.254">getNextRow</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;key,
                           <a href="https://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true" title="class or interface in java.util">NavigableSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&gt;&nbsp;set)</pre>
 </li>
 </ul>
@@ -751,7 +747,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>maybeCloneWithAllocator</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.286">maybeCloneWithAllocator</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.284">maybeCloneWithAllocator</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;cell,
                                      boolean&nbsp;forceCloneOfBigCell)</pre>
 <div class="block">If the segment has a memory allocator the cell is being cloned to this space, and returned;
  Otherwise the given cell is returned
@@ -775,7 +771,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>internalAdd</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.299">internalAdd</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;toAdd,
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.297">internalAdd</a>(<a href="../../../../../org/apache/hadoop/hbase/Cell.html" title="interface in org.apache.hadoop.hbase">Cell</a>&nbsp;toAdd,
                          boolean&nbsp;mslabUsed,
                          <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreSizing.html" title="class in org.apache.hadoop.hbase.regionserver">MemStoreSizing</a>&nbsp;memstoreSizing)</pre>
 </li>
@@ -786,7 +782,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>setOldestEditTimeToNow</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.305">setOldestEditTimeToNow</a>()</pre>
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.303">setOldestEditTimeToNow</a>()</pre>
 </li>
 </ul>
 <a name="keySize--">
@@ -795,7 +791,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>keySize</h4>
-<pre>protected abstract&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.314">keySize</a>()</pre>
+<pre>protected abstract&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.312">keySize</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>The total size of cells in this memstore. We will not consider cells in the snapshot</dd>
@@ -808,7 +804,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>heapSize</h4>
-<pre>protected abstract&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.320">heapSize</a>()</pre>
+<pre>protected abstract&nbsp;long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.318">heapSize</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>The total heap size of cells in this memstore. We will not consider cells in the
@@ -822,7 +818,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getComparator</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/CellComparator.html" title="interface in org.apache.hadoop.hbase">CellComparator</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.322">getComparator</a>()</pre>
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/CellComparator.html" title="interface in org.apache.hadoop.hbase">CellComparator</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.320">getComparator</a>()</pre>
 </li>
 </ul>
 <a name="getActive--">
@@ -831,7 +827,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getActive</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/MutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">MutableSegment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.327">getActive</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/MutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">MutableSegment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.325">getActive</a>()</pre>
 </li>
 </ul>
 <a name="getSnapshot--">
@@ -840,7 +836,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>getSnapshot</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">ImmutableSegment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.332">getSnapshot</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSegment.html" title="class in org.apache.hadoop.hbase.regionserver">ImmutableSegment</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.330">getSnapshot</a>()</pre>
 </li>
 </ul>
 <a name="checkActiveSize--">
@@ -849,7 +845,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockList">
 <li class="blockList">
 <h4>checkActiveSize</h4>
-<pre>protected abstract&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.339">checkActiveSize</a>()</pre>
+<pre>protected abstract&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.337">checkActiveSize</a>()</pre>
 <div class="block">Check whether anything need to be done based on the current active set size</div>
 </li>
 </ul>
@@ -859,7 +855,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStore
 <ul class="blockListLast">
 <li class="blockList">
 <h4>getSegments</h4>
-<pre>protected abstract&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.344">getSegments</a>()
+<pre>protected abstract&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html#line.342">getSegments</a>()
                                       throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.html
index cd95a31..53c5b52 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.html
@@ -219,7 +219,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache
 .hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apa
 che/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.h
 base.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--
 ">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/reg
 ionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/a
 pache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
index 4471109..7334403 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.html
@@ -248,7 +248,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache
 .hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apa
 che/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.h
 base.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--
 ">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-boolean-">indexEntryOnHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/reg
 ionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/a
 pache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
index db961c0..8d779cc 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
@@ -287,7 +287,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/ImmutableSe
 <!--   -->
 </a>
 <h3>Methods inherited from class&nbsp;org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html" title="class in org.apache.hadoop.hbase.regionserver">Segment</a></h3>
-<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache
 .hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">i
 terator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/ap
 ache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
+<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#close--">close</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compare-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compare</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#compareRows-org.apache.hadoop.hbase.Cell-org.apache.hadoop.hbase.Cell-">compareRows</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#decScannerCount--">decScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#dump-org.slf4j.Logger-">dump</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellLength-org.apache.hadoop.hbase.Cell-">getCellLength</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellsCount--">getCellsCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getCellSet--">getCellSet</a>, <a hr
 ef="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getComparator--">getComparator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreLAB--">getMemStoreLAB</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMemStoreSize--">getMemStoreSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getMinSequenceId--">getMinSequenceId</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanner-long-">getScanner</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSize--
 ">heapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incSize-long-long-long-">incSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#internalAdd-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">internalAdd</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isEmpty--">isEmpty</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#isTagsPresent--">isTagsPresent</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#iterator--">iterator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#keySize--">keySize</a>, <a href
 ="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#last--">last</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#maybeCloneWithAllocator-org.apache.hadoop.hbase.Cell-boolean-">maybeCloneWithAllocator</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSize--">offHeapSize</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#offHeapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">offHeapSizeChange</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#setCellSet-org.apache.hadoop.hbase.regionserver.CellSet-org.apache.hadoop.hbase.regionserver.CellSet-">setCellSet</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#shouldSeek-org.apache.hadoop.hbase.io.TimeRange-long-">shouldSeek</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#tailSet-org.apache.hadoop.hbase.Cell-">tailSet</a>, <a href="../../../../.
 ./org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a>, <a href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#updateMetaInfo-org.apache.hadoop.hbase.Cell-boolean-org.apache.hadoop.hbase.regionserver.MemStoreSizing-">updateMetaInfo</a></code></li>
 </ul>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
index c453cfd..aaab577 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.534">CompactingMemStore.InMemoryFlushRunnable</a>
+<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html#line.530">CompactingMemStore.InMemoryFlushRunnable</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true" title="class or interface in java.lang">Runnable</a></pre>
 <div class="block">The in-memory-flusher thread performs the flush asynchronously.
@@ -193,7 +193,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable
 <ul class="blockListLast">
 <li class="blockList">
 <h4>InMemoryFlushRunnable</h4>
-<pre>private&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html#line.534">InMemoryFlushRunnable</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html#line.530">InMemoryFlushRunnable</a>()</pre>
 </li>
 </ul>
 </li>
@@ -210,7 +210,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable
 <ul class="blockListLast">
 <li class="blockList">
 <h4>run</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html#line.537">run</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html#line.533">run</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--" title="class or interface in java.lang">run</a></code>&nbsp;in interface&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true" title="class or interface in java.lang">Runnable</a></code></dd>


[23/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
index 95b4c30..4d2c914 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
@@ -126,311 +126,303 @@
 <span class="sourceLineNo">118</span>    return new SegmentScanner(this, readPoint);<a name="line.118"></a>
 <span class="sourceLineNo">119</span>  }<a name="line.119"></a>
 <span class="sourceLineNo">120</span><a name="line.120"></a>
-<span class="sourceLineNo">121</span>  /**<a name="line.121"></a>
-<span class="sourceLineNo">122</span>   * Creates the scanner for the given read point, and a specific order in a list<a name="line.122"></a>
-<span class="sourceLineNo">123</span>   * @return a scanner for the given read point<a name="line.123"></a>
-<span class="sourceLineNo">124</span>   */<a name="line.124"></a>
-<span class="sourceLineNo">125</span>  public KeyValueScanner getScanner(long readPoint, long order) {<a name="line.125"></a>
-<span class="sourceLineNo">126</span>    return new SegmentScanner(this, readPoint, order);<a name="line.126"></a>
-<span class="sourceLineNo">127</span>  }<a name="line.127"></a>
-<span class="sourceLineNo">128</span><a name="line.128"></a>
-<span class="sourceLineNo">129</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPoint, long order) {<a name="line.129"></a>
-<span class="sourceLineNo">130</span>    return Collections.singletonList(new SegmentScanner(this, readPoint, order));<a name="line.130"></a>
-<span class="sourceLineNo">131</span>  }<a name="line.131"></a>
-<span class="sourceLineNo">132</span><a name="line.132"></a>
-<span class="sourceLineNo">133</span>  /**<a name="line.133"></a>
-<span class="sourceLineNo">134</span>   * @return whether the segment has any cells<a name="line.134"></a>
-<span class="sourceLineNo">135</span>   */<a name="line.135"></a>
-<span class="sourceLineNo">136</span>  public boolean isEmpty() {<a name="line.136"></a>
-<span class="sourceLineNo">137</span>    return getCellSet().isEmpty();<a name="line.137"></a>
-<span class="sourceLineNo">138</span>  }<a name="line.138"></a>
-<span class="sourceLineNo">139</span><a name="line.139"></a>
-<span class="sourceLineNo">140</span>  /**<a name="line.140"></a>
-<span class="sourceLineNo">141</span>   * @return number of cells in segment<a name="line.141"></a>
-<span class="sourceLineNo">142</span>   */<a name="line.142"></a>
-<span class="sourceLineNo">143</span>  public int getCellsCount() {<a name="line.143"></a>
-<span class="sourceLineNo">144</span>    return getCellSet().size();<a name="line.144"></a>
-<span class="sourceLineNo">145</span>  }<a name="line.145"></a>
-<span class="sourceLineNo">146</span><a name="line.146"></a>
-<span class="sourceLineNo">147</span>  /**<a name="line.147"></a>
-<span class="sourceLineNo">148</span>   * Closing a segment before it is being discarded<a name="line.148"></a>
-<span class="sourceLineNo">149</span>   */<a name="line.149"></a>
-<span class="sourceLineNo">150</span>  public void close() {<a name="line.150"></a>
-<span class="sourceLineNo">151</span>    if (this.memStoreLAB != null) {<a name="line.151"></a>
-<span class="sourceLineNo">152</span>      this.memStoreLAB.close();<a name="line.152"></a>
-<span class="sourceLineNo">153</span>    }<a name="line.153"></a>
-<span class="sourceLineNo">154</span>    // do not set MSLab to null as scanners may still be reading the data here and need to decrease<a name="line.154"></a>
-<span class="sourceLineNo">155</span>    // the counter when they finish<a name="line.155"></a>
-<span class="sourceLineNo">156</span>  }<a name="line.156"></a>
-<span class="sourceLineNo">157</span><a name="line.157"></a>
-<span class="sourceLineNo">158</span>  /**<a name="line.158"></a>
-<span class="sourceLineNo">159</span>   * If the segment has a memory allocator the cell is being cloned to this space, and returned;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>   * otherwise the given cell is returned<a name="line.160"></a>
-<span class="sourceLineNo">161</span>   *<a name="line.161"></a>
-<span class="sourceLineNo">162</span>   * When a cell's size is too big (bigger than maxAlloc), it is not allocated on MSLAB.<a name="line.162"></a>
-<span class="sourceLineNo">163</span>   * Since the process of flattening to CellChunkMap assumes that all cells<a name="line.163"></a>
-<span class="sourceLineNo">164</span>   * are allocated on MSLAB, during this process, the input parameter<a name="line.164"></a>
-<span class="sourceLineNo">165</span>   * forceCloneOfBigCell is set to 'true' and the cell is copied into MSLAB.<a name="line.165"></a>
-<span class="sourceLineNo">166</span>   *<a name="line.166"></a>
-<span class="sourceLineNo">167</span>   * @return either the given cell or its clone<a name="line.167"></a>
-<span class="sourceLineNo">168</span>   */<a name="line.168"></a>
-<span class="sourceLineNo">169</span>  public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {<a name="line.169"></a>
-<span class="sourceLineNo">170</span>    if (this.memStoreLAB == null) {<a name="line.170"></a>
-<span class="sourceLineNo">171</span>      return cell;<a name="line.171"></a>
-<span class="sourceLineNo">172</span>    }<a name="line.172"></a>
-<span class="sourceLineNo">173</span><a name="line.173"></a>
-<span class="sourceLineNo">174</span>    Cell cellFromMslab = null;<a name="line.174"></a>
-<span class="sourceLineNo">175</span>    if (forceCloneOfBigCell) {<a name="line.175"></a>
-<span class="sourceLineNo">176</span>      cellFromMslab = this.memStoreLAB.forceCopyOfBigCellInto(cell);<a name="line.176"></a>
-<span class="sourceLineNo">177</span>    } else {<a name="line.177"></a>
-<span class="sourceLineNo">178</span>      cellFromMslab = this.memStoreLAB.copyCellInto(cell);<a name="line.178"></a>
-<span class="sourceLineNo">179</span>    }<a name="line.179"></a>
-<span class="sourceLineNo">180</span>    return (cellFromMslab != null) ? cellFromMslab : cell;<a name="line.180"></a>
+<span class="sourceLineNo">121</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPoint) {<a name="line.121"></a>
+<span class="sourceLineNo">122</span>    return Collections.singletonList(new SegmentScanner(this, readPoint));<a name="line.122"></a>
+<span class="sourceLineNo">123</span>  }<a name="line.123"></a>
+<span class="sourceLineNo">124</span><a name="line.124"></a>
+<span class="sourceLineNo">125</span>  /**<a name="line.125"></a>
+<span class="sourceLineNo">126</span>   * @return whether the segment has any cells<a name="line.126"></a>
+<span class="sourceLineNo">127</span>   */<a name="line.127"></a>
+<span class="sourceLineNo">128</span>  public boolean isEmpty() {<a name="line.128"></a>
+<span class="sourceLineNo">129</span>    return getCellSet().isEmpty();<a name="line.129"></a>
+<span class="sourceLineNo">130</span>  }<a name="line.130"></a>
+<span class="sourceLineNo">131</span><a name="line.131"></a>
+<span class="sourceLineNo">132</span>  /**<a name="line.132"></a>
+<span class="sourceLineNo">133</span>   * @return number of cells in segment<a name="line.133"></a>
+<span class="sourceLineNo">134</span>   */<a name="line.134"></a>
+<span class="sourceLineNo">135</span>  public int getCellsCount() {<a name="line.135"></a>
+<span class="sourceLineNo">136</span>    return getCellSet().size();<a name="line.136"></a>
+<span class="sourceLineNo">137</span>  }<a name="line.137"></a>
+<span class="sourceLineNo">138</span><a name="line.138"></a>
+<span class="sourceLineNo">139</span>  /**<a name="line.139"></a>
+<span class="sourceLineNo">140</span>   * Closing a segment before it is being discarded<a name="line.140"></a>
+<span class="sourceLineNo">141</span>   */<a name="line.141"></a>
+<span class="sourceLineNo">142</span>  public void close() {<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    if (this.memStoreLAB != null) {<a name="line.143"></a>
+<span class="sourceLineNo">144</span>      this.memStoreLAB.close();<a name="line.144"></a>
+<span class="sourceLineNo">145</span>    }<a name="line.145"></a>
+<span class="sourceLineNo">146</span>    // do not set MSLab to null as scanners may still be reading the data here and need to decrease<a name="line.146"></a>
+<span class="sourceLineNo">147</span>    // the counter when they finish<a name="line.147"></a>
+<span class="sourceLineNo">148</span>  }<a name="line.148"></a>
+<span class="sourceLineNo">149</span><a name="line.149"></a>
+<span class="sourceLineNo">150</span>  /**<a name="line.150"></a>
+<span class="sourceLineNo">151</span>   * If the segment has a memory allocator the cell is being cloned to this space, and returned;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>   * otherwise the given cell is returned<a name="line.152"></a>
+<span class="sourceLineNo">153</span>   *<a name="line.153"></a>
+<span class="sourceLineNo">154</span>   * When a cell's size is too big (bigger than maxAlloc), it is not allocated on MSLAB.<a name="line.154"></a>
+<span class="sourceLineNo">155</span>   * Since the process of flattening to CellChunkMap assumes that all cells<a name="line.155"></a>
+<span class="sourceLineNo">156</span>   * are allocated on MSLAB, during this process, the input parameter<a name="line.156"></a>
+<span class="sourceLineNo">157</span>   * forceCloneOfBigCell is set to 'true' and the cell is copied into MSLAB.<a name="line.157"></a>
+<span class="sourceLineNo">158</span>   *<a name="line.158"></a>
+<span class="sourceLineNo">159</span>   * @return either the given cell or its clone<a name="line.159"></a>
+<span class="sourceLineNo">160</span>   */<a name="line.160"></a>
+<span class="sourceLineNo">161</span>  public Cell maybeCloneWithAllocator(Cell cell, boolean forceCloneOfBigCell) {<a name="line.161"></a>
+<span class="sourceLineNo">162</span>    if (this.memStoreLAB == null) {<a name="line.162"></a>
+<span class="sourceLineNo">163</span>      return cell;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>    }<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>    Cell cellFromMslab = null;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>    if (forceCloneOfBigCell) {<a name="line.167"></a>
+<span class="sourceLineNo">168</span>      cellFromMslab = this.memStoreLAB.forceCopyOfBigCellInto(cell);<a name="line.168"></a>
+<span class="sourceLineNo">169</span>    } else {<a name="line.169"></a>
+<span class="sourceLineNo">170</span>      cellFromMslab = this.memStoreLAB.copyCellInto(cell);<a name="line.170"></a>
+<span class="sourceLineNo">171</span>    }<a name="line.171"></a>
+<span class="sourceLineNo">172</span>    return (cellFromMslab != null) ? cellFromMslab : cell;<a name="line.172"></a>
+<span class="sourceLineNo">173</span>  }<a name="line.173"></a>
+<span class="sourceLineNo">174</span><a name="line.174"></a>
+<span class="sourceLineNo">175</span>  /**<a name="line.175"></a>
+<span class="sourceLineNo">176</span>   * Get cell length after serialized in {@link KeyValue}<a name="line.176"></a>
+<span class="sourceLineNo">177</span>   */<a name="line.177"></a>
+<span class="sourceLineNo">178</span>  @VisibleForTesting<a name="line.178"></a>
+<span class="sourceLineNo">179</span>  static int getCellLength(Cell cell) {<a name="line.179"></a>
+<span class="sourceLineNo">180</span>    return KeyValueUtil.length(cell);<a name="line.180"></a>
 <span class="sourceLineNo">181</span>  }<a name="line.181"></a>
 <span class="sourceLineNo">182</span><a name="line.182"></a>
-<span class="sourceLineNo">183</span>  /**<a name="line.183"></a>
-<span class="sourceLineNo">184</span>   * Get cell length after serialized in {@link KeyValue}<a name="line.184"></a>
-<span class="sourceLineNo">185</span>   */<a name="line.185"></a>
-<span class="sourceLineNo">186</span>  @VisibleForTesting<a name="line.186"></a>
-<span class="sourceLineNo">187</span>  static int getCellLength(Cell cell) {<a name="line.187"></a>
-<span class="sourceLineNo">188</span>    return KeyValueUtil.length(cell);<a name="line.188"></a>
-<span class="sourceLineNo">189</span>  }<a name="line.189"></a>
-<span class="sourceLineNo">190</span><a name="line.190"></a>
-<span class="sourceLineNo">191</span>  public boolean shouldSeek(TimeRange tr, long oldestUnexpiredTS) {<a name="line.191"></a>
-<span class="sourceLineNo">192</span>    return !isEmpty()<a name="line.192"></a>
-<span class="sourceLineNo">193</span>        &amp;&amp; (tr.isAllTime() || timeRangeTracker.includesTimeRange(tr))<a name="line.193"></a>
-<span class="sourceLineNo">194</span>        &amp;&amp; timeRangeTracker.getMax() &gt;= oldestUnexpiredTS;<a name="line.194"></a>
-<span class="sourceLineNo">195</span>  }<a name="line.195"></a>
-<span class="sourceLineNo">196</span><a name="line.196"></a>
-<span class="sourceLineNo">197</span>  public boolean isTagsPresent() {<a name="line.197"></a>
-<span class="sourceLineNo">198</span>    return tagsPresent;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>  }<a name="line.199"></a>
-<span class="sourceLineNo">200</span><a name="line.200"></a>
-<span class="sourceLineNo">201</span>  public void incScannerCount() {<a name="line.201"></a>
-<span class="sourceLineNo">202</span>    if (this.memStoreLAB != null) {<a name="line.202"></a>
-<span class="sourceLineNo">203</span>      this.memStoreLAB.incScannerCount();<a name="line.203"></a>
-<span class="sourceLineNo">204</span>    }<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  }<a name="line.205"></a>
-<span class="sourceLineNo">206</span><a name="line.206"></a>
-<span class="sourceLineNo">207</span>  public void decScannerCount() {<a name="line.207"></a>
-<span class="sourceLineNo">208</span>    if (this.memStoreLAB != null) {<a name="line.208"></a>
-<span class="sourceLineNo">209</span>      this.memStoreLAB.decScannerCount();<a name="line.209"></a>
-<span class="sourceLineNo">210</span>    }<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  }<a name="line.211"></a>
-<span class="sourceLineNo">212</span><a name="line.212"></a>
-<span class="sourceLineNo">213</span>  /**<a name="line.213"></a>
-<span class="sourceLineNo">214</span>   * Setting the CellSet of the segment - used only for flat immutable segment for setting<a name="line.214"></a>
-<span class="sourceLineNo">215</span>   * immutable CellSet after its creation in immutable segment constructor<a name="line.215"></a>
-<span class="sourceLineNo">216</span>   * @return this object<a name="line.216"></a>
-<span class="sourceLineNo">217</span>   */<a name="line.217"></a>
-<span class="sourceLineNo">218</span><a name="line.218"></a>
-<span class="sourceLineNo">219</span>  protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {<a name="line.219"></a>
-<span class="sourceLineNo">220</span>    this.cellSet.compareAndSet(cellSetOld, cellSetNew);<a name="line.220"></a>
-<span class="sourceLineNo">221</span>    return this;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  }<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  public MemStoreSize getMemStoreSize() {<a name="line.224"></a>
-<span class="sourceLineNo">225</span>    return this.segmentSize;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  }<a name="line.226"></a>
-<span class="sourceLineNo">227</span><a name="line.227"></a>
-<span class="sourceLineNo">228</span>  /**<a name="line.228"></a>
-<span class="sourceLineNo">229</span>   * @return Sum of all cell's size.<a name="line.229"></a>
-<span class="sourceLineNo">230</span>   */<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  public long keySize() {<a name="line.231"></a>
-<span class="sourceLineNo">232</span>    return this.segmentSize.getDataSize();<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  }<a name="line.233"></a>
-<span class="sourceLineNo">234</span><a name="line.234"></a>
-<span class="sourceLineNo">235</span>  /**<a name="line.235"></a>
-<span class="sourceLineNo">236</span>   * @return The heap size of this segment.<a name="line.236"></a>
-<span class="sourceLineNo">237</span>   */<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  public long heapSize() {<a name="line.238"></a>
-<span class="sourceLineNo">239</span>    return this.segmentSize.getHeapSize();<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  }<a name="line.240"></a>
-<span class="sourceLineNo">241</span><a name="line.241"></a>
-<span class="sourceLineNo">242</span>  /**<a name="line.242"></a>
-<span class="sourceLineNo">243</span>   * @return The off-heap size of this segment.<a name="line.243"></a>
-<span class="sourceLineNo">244</span>   */<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  public long offHeapSize() {<a name="line.245"></a>
-<span class="sourceLineNo">246</span>    return this.segmentSize.getOffHeapSize();<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  }<a name="line.247"></a>
-<span class="sourceLineNo">248</span><a name="line.248"></a>
-<span class="sourceLineNo">249</span>  /**<a name="line.249"></a>
-<span class="sourceLineNo">250</span>   * Updates the size counters of the segment by the given delta<a name="line.250"></a>
-<span class="sourceLineNo">251</span>   */<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  //TODO<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  protected void incSize(long delta, long heapOverhead, long offHeapOverhead) {<a name="line.253"></a>
-<span class="sourceLineNo">254</span>    synchronized (this) {<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      this.segmentSize.incMemStoreSize(delta, heapOverhead, offHeapOverhead);<a name="line.255"></a>
-<span class="sourceLineNo">256</span>    }<a name="line.256"></a>
+<span class="sourceLineNo">183</span>  public boolean shouldSeek(TimeRange tr, long oldestUnexpiredTS) {<a name="line.183"></a>
+<span class="sourceLineNo">184</span>    return !isEmpty()<a name="line.184"></a>
+<span class="sourceLineNo">185</span>        &amp;&amp; (tr.isAllTime() || timeRangeTracker.includesTimeRange(tr))<a name="line.185"></a>
+<span class="sourceLineNo">186</span>        &amp;&amp; timeRangeTracker.getMax() &gt;= oldestUnexpiredTS;<a name="line.186"></a>
+<span class="sourceLineNo">187</span>  }<a name="line.187"></a>
+<span class="sourceLineNo">188</span><a name="line.188"></a>
+<span class="sourceLineNo">189</span>  public boolean isTagsPresent() {<a name="line.189"></a>
+<span class="sourceLineNo">190</span>    return tagsPresent;<a name="line.190"></a>
+<span class="sourceLineNo">191</span>  }<a name="line.191"></a>
+<span class="sourceLineNo">192</span><a name="line.192"></a>
+<span class="sourceLineNo">193</span>  public void incScannerCount() {<a name="line.193"></a>
+<span class="sourceLineNo">194</span>    if (this.memStoreLAB != null) {<a name="line.194"></a>
+<span class="sourceLineNo">195</span>      this.memStoreLAB.incScannerCount();<a name="line.195"></a>
+<span class="sourceLineNo">196</span>    }<a name="line.196"></a>
+<span class="sourceLineNo">197</span>  }<a name="line.197"></a>
+<span class="sourceLineNo">198</span><a name="line.198"></a>
+<span class="sourceLineNo">199</span>  public void decScannerCount() {<a name="line.199"></a>
+<span class="sourceLineNo">200</span>    if (this.memStoreLAB != null) {<a name="line.200"></a>
+<span class="sourceLineNo">201</span>      this.memStoreLAB.decScannerCount();<a name="line.201"></a>
+<span class="sourceLineNo">202</span>    }<a name="line.202"></a>
+<span class="sourceLineNo">203</span>  }<a name="line.203"></a>
+<span class="sourceLineNo">204</span><a name="line.204"></a>
+<span class="sourceLineNo">205</span>  /**<a name="line.205"></a>
+<span class="sourceLineNo">206</span>   * Setting the CellSet of the segment - used only for flat immutable segment for setting<a name="line.206"></a>
+<span class="sourceLineNo">207</span>   * immutable CellSet after its creation in immutable segment constructor<a name="line.207"></a>
+<span class="sourceLineNo">208</span>   * @return this object<a name="line.208"></a>
+<span class="sourceLineNo">209</span>   */<a name="line.209"></a>
+<span class="sourceLineNo">210</span><a name="line.210"></a>
+<span class="sourceLineNo">211</span>  protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {<a name="line.211"></a>
+<span class="sourceLineNo">212</span>    this.cellSet.compareAndSet(cellSetOld, cellSetNew);<a name="line.212"></a>
+<span class="sourceLineNo">213</span>    return this;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  }<a name="line.214"></a>
+<span class="sourceLineNo">215</span><a name="line.215"></a>
+<span class="sourceLineNo">216</span>  public MemStoreSize getMemStoreSize() {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>    return this.segmentSize;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  }<a name="line.218"></a>
+<span class="sourceLineNo">219</span><a name="line.219"></a>
+<span class="sourceLineNo">220</span>  /**<a name="line.220"></a>
+<span class="sourceLineNo">221</span>   * @return Sum of all cell's size.<a name="line.221"></a>
+<span class="sourceLineNo">222</span>   */<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  public long keySize() {<a name="line.223"></a>
+<span class="sourceLineNo">224</span>    return this.segmentSize.getDataSize();<a name="line.224"></a>
+<span class="sourceLineNo">225</span>  }<a name="line.225"></a>
+<span class="sourceLineNo">226</span><a name="line.226"></a>
+<span class="sourceLineNo">227</span>  /**<a name="line.227"></a>
+<span class="sourceLineNo">228</span>   * @return The heap size of this segment.<a name="line.228"></a>
+<span class="sourceLineNo">229</span>   */<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  public long heapSize() {<a name="line.230"></a>
+<span class="sourceLineNo">231</span>    return this.segmentSize.getHeapSize();<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  }<a name="line.232"></a>
+<span class="sourceLineNo">233</span><a name="line.233"></a>
+<span class="sourceLineNo">234</span>  /**<a name="line.234"></a>
+<span class="sourceLineNo">235</span>   * @return The off-heap size of this segment.<a name="line.235"></a>
+<span class="sourceLineNo">236</span>   */<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  public long offHeapSize() {<a name="line.237"></a>
+<span class="sourceLineNo">238</span>    return this.segmentSize.getOffHeapSize();<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  }<a name="line.239"></a>
+<span class="sourceLineNo">240</span><a name="line.240"></a>
+<span class="sourceLineNo">241</span>  /**<a name="line.241"></a>
+<span class="sourceLineNo">242</span>   * Updates the size counters of the segment by the given delta<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   */<a name="line.243"></a>
+<span class="sourceLineNo">244</span>  //TODO<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  protected void incSize(long delta, long heapOverhead, long offHeapOverhead) {<a name="line.245"></a>
+<span class="sourceLineNo">246</span>    synchronized (this) {<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      this.segmentSize.incMemStoreSize(delta, heapOverhead, offHeapOverhead);<a name="line.247"></a>
+<span class="sourceLineNo">248</span>    }<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  }<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  public long getMinSequenceId() {<a name="line.251"></a>
+<span class="sourceLineNo">252</span>    return minSequenceId;<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  }<a name="line.253"></a>
+<span class="sourceLineNo">254</span><a name="line.254"></a>
+<span class="sourceLineNo">255</span>  public TimeRangeTracker getTimeRangeTracker() {<a name="line.255"></a>
+<span class="sourceLineNo">256</span>    return this.timeRangeTracker;<a name="line.256"></a>
 <span class="sourceLineNo">257</span>  }<a name="line.257"></a>
 <span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  public long getMinSequenceId() {<a name="line.259"></a>
-<span class="sourceLineNo">260</span>    return minSequenceId;<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  }<a name="line.261"></a>
-<span class="sourceLineNo">262</span><a name="line.262"></a>
-<span class="sourceLineNo">263</span>  public TimeRangeTracker getTimeRangeTracker() {<a name="line.263"></a>
-<span class="sourceLineNo">264</span>    return this.timeRangeTracker;<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  }<a name="line.265"></a>
-<span class="sourceLineNo">266</span><a name="line.266"></a>
-<span class="sourceLineNo">267</span>  //*** Methods for SegmentsScanner<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  public Cell last() {<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    return getCellSet().last();<a name="line.269"></a>
+<span class="sourceLineNo">259</span>  //*** Methods for SegmentsScanner<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  public Cell last() {<a name="line.260"></a>
+<span class="sourceLineNo">261</span>    return getCellSet().last();<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  }<a name="line.262"></a>
+<span class="sourceLineNo">263</span><a name="line.263"></a>
+<span class="sourceLineNo">264</span>  public Iterator&lt;Cell&gt; iterator() {<a name="line.264"></a>
+<span class="sourceLineNo">265</span>    return getCellSet().iterator();<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  }<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  public SortedSet&lt;Cell&gt; headSet(Cell firstKeyOnRow) {<a name="line.268"></a>
+<span class="sourceLineNo">269</span>    return getCellSet().headSet(firstKeyOnRow);<a name="line.269"></a>
 <span class="sourceLineNo">270</span>  }<a name="line.270"></a>
 <span class="sourceLineNo">271</span><a name="line.271"></a>
-<span class="sourceLineNo">272</span>  public Iterator&lt;Cell&gt; iterator() {<a name="line.272"></a>
-<span class="sourceLineNo">273</span>    return getCellSet().iterator();<a name="line.273"></a>
+<span class="sourceLineNo">272</span>  public int compare(Cell left, Cell right) {<a name="line.272"></a>
+<span class="sourceLineNo">273</span>    return getComparator().compare(left, right);<a name="line.273"></a>
 <span class="sourceLineNo">274</span>  }<a name="line.274"></a>
 <span class="sourceLineNo">275</span><a name="line.275"></a>
-<span class="sourceLineNo">276</span>  public SortedSet&lt;Cell&gt; headSet(Cell firstKeyOnRow) {<a name="line.276"></a>
-<span class="sourceLineNo">277</span>    return getCellSet().headSet(firstKeyOnRow);<a name="line.277"></a>
+<span class="sourceLineNo">276</span>  public int compareRows(Cell left, Cell right) {<a name="line.276"></a>
+<span class="sourceLineNo">277</span>    return getComparator().compareRows(left, right);<a name="line.277"></a>
 <span class="sourceLineNo">278</span>  }<a name="line.278"></a>
 <span class="sourceLineNo">279</span><a name="line.279"></a>
-<span class="sourceLineNo">280</span>  public int compare(Cell left, Cell right) {<a name="line.280"></a>
-<span class="sourceLineNo">281</span>    return getComparator().compare(left, right);<a name="line.281"></a>
-<span class="sourceLineNo">282</span>  }<a name="line.282"></a>
-<span class="sourceLineNo">283</span><a name="line.283"></a>
-<span class="sourceLineNo">284</span>  public int compareRows(Cell left, Cell right) {<a name="line.284"></a>
-<span class="sourceLineNo">285</span>    return getComparator().compareRows(left, right);<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  }<a name="line.286"></a>
-<span class="sourceLineNo">287</span><a name="line.287"></a>
-<span class="sourceLineNo">288</span>  /**<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * @return a set of all cells in the segment<a name="line.289"></a>
+<span class="sourceLineNo">280</span>  /**<a name="line.280"></a>
+<span class="sourceLineNo">281</span>   * @return a set of all cells in the segment<a name="line.281"></a>
+<span class="sourceLineNo">282</span>   */<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  protected CellSet getCellSet() {<a name="line.283"></a>
+<span class="sourceLineNo">284</span>    return cellSet.get();<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  }<a name="line.285"></a>
+<span class="sourceLineNo">286</span><a name="line.286"></a>
+<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
+<span class="sourceLineNo">288</span>   * Returns the Cell comparator used by this segment<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * @return the Cell comparator used by this segment<a name="line.289"></a>
 <span class="sourceLineNo">290</span>   */<a name="line.290"></a>
-<span class="sourceLineNo">291</span>  protected CellSet getCellSet() {<a name="line.291"></a>
-<span class="sourceLineNo">292</span>    return cellSet.get();<a name="line.292"></a>
+<span class="sourceLineNo">291</span>  protected CellComparator getComparator() {<a name="line.291"></a>
+<span class="sourceLineNo">292</span>    return comparator;<a name="line.292"></a>
 <span class="sourceLineNo">293</span>  }<a name="line.293"></a>
 <span class="sourceLineNo">294</span><a name="line.294"></a>
-<span class="sourceLineNo">295</span>  /**<a name="line.295"></a>
-<span class="sourceLineNo">296</span>   * Returns the Cell comparator used by this segment<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * @return the Cell comparator used by this segment<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
-<span class="sourceLineNo">299</span>  protected CellComparator getComparator() {<a name="line.299"></a>
-<span class="sourceLineNo">300</span>    return comparator;<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  }<a name="line.301"></a>
-<span class="sourceLineNo">302</span><a name="line.302"></a>
-<span class="sourceLineNo">303</span>  protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing) {<a name="line.303"></a>
-<span class="sourceLineNo">304</span>    boolean succ = getCellSet().add(cell);<a name="line.304"></a>
-<span class="sourceLineNo">305</span>    updateMetaInfo(cell, succ, mslabUsed, memstoreSizing);<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  }<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,<a name="line.308"></a>
-<span class="sourceLineNo">309</span>      MemStoreSizing memstoreSizing) {<a name="line.309"></a>
-<span class="sourceLineNo">310</span>    long cellSize = 0;<a name="line.310"></a>
-<span class="sourceLineNo">311</span>    // If there's already a same cell in the CellSet and we are using MSLAB, we must count in the<a name="line.311"></a>
-<span class="sourceLineNo">312</span>    // MSLAB allocation size as well, or else there will be memory leak (occupied heap size larger<a name="line.312"></a>
-<span class="sourceLineNo">313</span>    // than the counted number)<a name="line.313"></a>
-<span class="sourceLineNo">314</span>    if (succ || mslabUsed) {<a name="line.314"></a>
-<span class="sourceLineNo">315</span>      cellSize = getCellLength(cellToAdd);<a name="line.315"></a>
-<span class="sourceLineNo">316</span>    }<a name="line.316"></a>
-<span class="sourceLineNo">317</span>    long heapSize = heapSizeChange(cellToAdd, succ);<a name="line.317"></a>
-<span class="sourceLineNo">318</span>    long offHeapSize = offHeapSizeChange(cellToAdd, succ);<a name="line.318"></a>
-<span class="sourceLineNo">319</span>    incSize(cellSize, heapSize, offHeapSize);<a name="line.319"></a>
-<span class="sourceLineNo">320</span>    if (memstoreSizing != null) {<a name="line.320"></a>
-<span class="sourceLineNo">321</span>      memstoreSizing.incMemStoreSize(cellSize, heapSize, offHeapSize);<a name="line.321"></a>
-<span class="sourceLineNo">322</span>    }<a name="line.322"></a>
-<span class="sourceLineNo">323</span>    getTimeRangeTracker().includeTimestamp(cellToAdd);<a name="line.323"></a>
-<span class="sourceLineNo">324</span>    minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId());<a name="line.324"></a>
-<span class="sourceLineNo">325</span>    // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call.<a name="line.325"></a>
-<span class="sourceLineNo">326</span>    // When we use ACL CP or Visibility CP which deals with Tags during<a name="line.326"></a>
-<span class="sourceLineNo">327</span>    // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not<a name="line.327"></a>
-<span class="sourceLineNo">328</span>    // parse the byte[] to identify the tags length.<a name="line.328"></a>
-<span class="sourceLineNo">329</span>    if (cellToAdd.getTagsLength() &gt; 0) {<a name="line.329"></a>
-<span class="sourceLineNo">330</span>      tagsPresent = true;<a name="line.330"></a>
-<span class="sourceLineNo">331</span>    }<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  }<a name="line.332"></a>
-<span class="sourceLineNo">333</span><a name="line.333"></a>
-<span class="sourceLineNo">334</span>  protected void updateMetaInfo(Cell cellToAdd, boolean succ, MemStoreSizing memstoreSizing) {<a name="line.334"></a>
-<span class="sourceLineNo">335</span>    updateMetaInfo(cellToAdd, succ, (getMemStoreLAB()!=null), memstoreSizing);<a name="line.335"></a>
-<span class="sourceLineNo">336</span>  }<a name="line.336"></a>
-<span class="sourceLineNo">337</span><a name="line.337"></a>
-<span class="sourceLineNo">338</span>  /**<a name="line.338"></a>
-<span class="sourceLineNo">339</span>   * @return The increase in heap size because of this cell addition. This includes this cell POJO's<a name="line.339"></a>
-<span class="sourceLineNo">340</span>   *         heap size itself and additional overhead because of addition on to CSLM.<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   */<a name="line.341"></a>
-<span class="sourceLineNo">342</span>  protected long heapSizeChange(Cell cell, boolean succ) {<a name="line.342"></a>
-<span class="sourceLineNo">343</span>    long res = 0;<a name="line.343"></a>
-<span class="sourceLineNo">344</span>    if (succ) {<a name="line.344"></a>
-<span class="sourceLineNo">345</span>      boolean onHeap = true;<a name="line.345"></a>
-<span class="sourceLineNo">346</span>      MemStoreLAB memStoreLAB = getMemStoreLAB();<a name="line.346"></a>
-<span class="sourceLineNo">347</span>      if(memStoreLAB != null) {<a name="line.347"></a>
-<span class="sourceLineNo">348</span>        onHeap = memStoreLAB.isOnHeap();<a name="line.348"></a>
-<span class="sourceLineNo">349</span>      }<a name="line.349"></a>
-<span class="sourceLineNo">350</span>      res += indexEntryOnHeapSize(onHeap);<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      if(onHeap) {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>        res += PrivateCellUtil.estimatedSizeOfCell(cell);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>      }<a name="line.353"></a>
-<span class="sourceLineNo">354</span>      res = ClassSize.align(res);<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    }<a name="line.355"></a>
-<span class="sourceLineNo">356</span>    return res;<a name="line.356"></a>
-<span class="sourceLineNo">357</span>  }<a name="line.357"></a>
-<span class="sourceLineNo">358</span><a name="line.358"></a>
-<span class="sourceLineNo">359</span>  protected long offHeapSizeChange(Cell cell, boolean succ) {<a name="line.359"></a>
-<span class="sourceLineNo">360</span>    long res = 0;<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    if (succ) {<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      boolean offHeap = false;<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      MemStoreLAB memStoreLAB = getMemStoreLAB();<a name="line.363"></a>
-<span class="sourceLineNo">364</span>      if(memStoreLAB != null) {<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        offHeap = memStoreLAB.isOffHeap();<a name="line.365"></a>
-<span class="sourceLineNo">366</span>      }<a name="line.366"></a>
-<span class="sourceLineNo">367</span>      res += indexEntryOffHeapSize(offHeap);<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      if(offHeap) {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>        res += PrivateCellUtil.estimatedSizeOfCell(cell);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>      }<a name="line.370"></a>
-<span class="sourceLineNo">371</span>      res = ClassSize.align(res);<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    }<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    return res;<a name="line.373"></a>
-<span class="sourceLineNo">374</span>  }<a name="line.374"></a>
-<span class="sourceLineNo">375</span><a name="line.375"></a>
-<span class="sourceLineNo">376</span>  protected long indexEntryOnHeapSize(boolean onHeap) {<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    // in most cases index is allocated on-heap<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    // override this method when it is not always the case, e.g., in CCM<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    return indexEntrySize();<a name="line.379"></a>
-<span class="sourceLineNo">380</span>  }<a name="line.380"></a>
+<span class="sourceLineNo">295</span>  protected void internalAdd(Cell cell, boolean mslabUsed, MemStoreSizing memstoreSizing) {<a name="line.295"></a>
+<span class="sourceLineNo">296</span>    boolean succ = getCellSet().add(cell);<a name="line.296"></a>
+<span class="sourceLineNo">297</span>    updateMetaInfo(cell, succ, mslabUsed, memstoreSizing);<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  }<a name="line.298"></a>
+<span class="sourceLineNo">299</span><a name="line.299"></a>
+<span class="sourceLineNo">300</span>  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,<a name="line.300"></a>
+<span class="sourceLineNo">301</span>      MemStoreSizing memstoreSizing) {<a name="line.301"></a>
+<span class="sourceLineNo">302</span>    long cellSize = 0;<a name="line.302"></a>
+<span class="sourceLineNo">303</span>    // If there's already a same cell in the CellSet and we are using MSLAB, we must count in the<a name="line.303"></a>
+<span class="sourceLineNo">304</span>    // MSLAB allocation size as well, or else there will be memory leak (occupied heap size larger<a name="line.304"></a>
+<span class="sourceLineNo">305</span>    // than the counted number)<a name="line.305"></a>
+<span class="sourceLineNo">306</span>    if (succ || mslabUsed) {<a name="line.306"></a>
+<span class="sourceLineNo">307</span>      cellSize = getCellLength(cellToAdd);<a name="line.307"></a>
+<span class="sourceLineNo">308</span>    }<a name="line.308"></a>
+<span class="sourceLineNo">309</span>    long heapSize = heapSizeChange(cellToAdd, succ);<a name="line.309"></a>
+<span class="sourceLineNo">310</span>    long offHeapSize = offHeapSizeChange(cellToAdd, succ);<a name="line.310"></a>
+<span class="sourceLineNo">311</span>    incSize(cellSize, heapSize, offHeapSize);<a name="line.311"></a>
+<span class="sourceLineNo">312</span>    if (memstoreSizing != null) {<a name="line.312"></a>
+<span class="sourceLineNo">313</span>      memstoreSizing.incMemStoreSize(cellSize, heapSize, offHeapSize);<a name="line.313"></a>
+<span class="sourceLineNo">314</span>    }<a name="line.314"></a>
+<span class="sourceLineNo">315</span>    getTimeRangeTracker().includeTimestamp(cellToAdd);<a name="line.315"></a>
+<span class="sourceLineNo">316</span>    minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId());<a name="line.316"></a>
+<span class="sourceLineNo">317</span>    // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call.<a name="line.317"></a>
+<span class="sourceLineNo">318</span>    // When we use ACL CP or Visibility CP which deals with Tags during<a name="line.318"></a>
+<span class="sourceLineNo">319</span>    // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not<a name="line.319"></a>
+<span class="sourceLineNo">320</span>    // parse the byte[] to identify the tags length.<a name="line.320"></a>
+<span class="sourceLineNo">321</span>    if (cellToAdd.getTagsLength() &gt; 0) {<a name="line.321"></a>
+<span class="sourceLineNo">322</span>      tagsPresent = true;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>    }<a name="line.323"></a>
+<span class="sourceLineNo">324</span>  }<a name="line.324"></a>
+<span class="sourceLineNo">325</span><a name="line.325"></a>
+<span class="sourceLineNo">326</span>  protected void updateMetaInfo(Cell cellToAdd, boolean succ, MemStoreSizing memstoreSizing) {<a name="line.326"></a>
+<span class="sourceLineNo">327</span>    updateMetaInfo(cellToAdd, succ, (getMemStoreLAB()!=null), memstoreSizing);<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  }<a name="line.328"></a>
+<span class="sourceLineNo">329</span><a name="line.329"></a>
+<span class="sourceLineNo">330</span>  /**<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   * @return The increase in heap size because of this cell addition. This includes this cell POJO's<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   *         heap size itself and additional overhead because of addition on to CSLM.<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   */<a name="line.333"></a>
+<span class="sourceLineNo">334</span>  protected long heapSizeChange(Cell cell, boolean succ) {<a name="line.334"></a>
+<span class="sourceLineNo">335</span>    long res = 0;<a name="line.335"></a>
+<span class="sourceLineNo">336</span>    if (succ) {<a name="line.336"></a>
+<span class="sourceLineNo">337</span>      boolean onHeap = true;<a name="line.337"></a>
+<span class="sourceLineNo">338</span>      MemStoreLAB memStoreLAB = getMemStoreLAB();<a name="line.338"></a>
+<span class="sourceLineNo">339</span>      if(memStoreLAB != null) {<a name="line.339"></a>
+<span class="sourceLineNo">340</span>        onHeap = memStoreLAB.isOnHeap();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>      }<a name="line.341"></a>
+<span class="sourceLineNo">342</span>      res += indexEntryOnHeapSize(onHeap);<a name="line.342"></a>
+<span class="sourceLineNo">343</span>      if(onHeap) {<a name="line.343"></a>
+<span class="sourceLineNo">344</span>        res += PrivateCellUtil.estimatedSizeOfCell(cell);<a name="line.344"></a>
+<span class="sourceLineNo">345</span>      }<a name="line.345"></a>
+<span class="sourceLineNo">346</span>      res = ClassSize.align(res);<a name="line.346"></a>
+<span class="sourceLineNo">347</span>    }<a name="line.347"></a>
+<span class="sourceLineNo">348</span>    return res;<a name="line.348"></a>
+<span class="sourceLineNo">349</span>  }<a name="line.349"></a>
+<span class="sourceLineNo">350</span><a name="line.350"></a>
+<span class="sourceLineNo">351</span>  protected long offHeapSizeChange(Cell cell, boolean succ) {<a name="line.351"></a>
+<span class="sourceLineNo">352</span>    long res = 0;<a name="line.352"></a>
+<span class="sourceLineNo">353</span>    if (succ) {<a name="line.353"></a>
+<span class="sourceLineNo">354</span>      boolean offHeap = false;<a name="line.354"></a>
+<span class="sourceLineNo">355</span>      MemStoreLAB memStoreLAB = getMemStoreLAB();<a name="line.355"></a>
+<span class="sourceLineNo">356</span>      if(memStoreLAB != null) {<a name="line.356"></a>
+<span class="sourceLineNo">357</span>        offHeap = memStoreLAB.isOffHeap();<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      }<a name="line.358"></a>
+<span class="sourceLineNo">359</span>      res += indexEntryOffHeapSize(offHeap);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>      if(offHeap) {<a name="line.360"></a>
+<span class="sourceLineNo">361</span>        res += PrivateCellUtil.estimatedSizeOfCell(cell);<a name="line.361"></a>
+<span class="sourceLineNo">362</span>      }<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      res = ClassSize.align(res);<a name="line.363"></a>
+<span class="sourceLineNo">364</span>    }<a name="line.364"></a>
+<span class="sourceLineNo">365</span>    return res;<a name="line.365"></a>
+<span class="sourceLineNo">366</span>  }<a name="line.366"></a>
+<span class="sourceLineNo">367</span><a name="line.367"></a>
+<span class="sourceLineNo">368</span>  protected long indexEntryOnHeapSize(boolean onHeap) {<a name="line.368"></a>
+<span class="sourceLineNo">369</span>    // in most cases index is allocated on-heap<a name="line.369"></a>
+<span class="sourceLineNo">370</span>    // override this method when it is not always the case, e.g., in CCM<a name="line.370"></a>
+<span class="sourceLineNo">371</span>    return indexEntrySize();<a name="line.371"></a>
+<span class="sourceLineNo">372</span>  }<a name="line.372"></a>
+<span class="sourceLineNo">373</span><a name="line.373"></a>
+<span class="sourceLineNo">374</span>  protected long indexEntryOffHeapSize(boolean offHeap) {<a name="line.374"></a>
+<span class="sourceLineNo">375</span>    // in most cases index is allocated on-heap<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    // override this method when it is not always the case, e.g., in CCM<a name="line.376"></a>
+<span class="sourceLineNo">377</span>    return 0;<a name="line.377"></a>
+<span class="sourceLineNo">378</span>  }<a name="line.378"></a>
+<span class="sourceLineNo">379</span><a name="line.379"></a>
+<span class="sourceLineNo">380</span>  protected abstract long indexEntrySize();<a name="line.380"></a>
 <span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  protected long indexEntryOffHeapSize(boolean offHeap) {<a name="line.382"></a>
-<span class="sourceLineNo">383</span>    // in most cases index is allocated on-heap<a name="line.383"></a>
-<span class="sourceLineNo">384</span>    // override this method when it is not always the case, e.g., in CCM<a name="line.384"></a>
-<span class="sourceLineNo">385</span>    return 0;<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  }<a name="line.386"></a>
-<span class="sourceLineNo">387</span><a name="line.387"></a>
-<span class="sourceLineNo">388</span>  protected abstract long indexEntrySize();<a name="line.388"></a>
-<span class="sourceLineNo">389</span><a name="line.389"></a>
-<span class="sourceLineNo">390</span>  /**<a name="line.390"></a>
-<span class="sourceLineNo">391</span>   * Returns a subset of the segment cell set, which starts with the given cell<a name="line.391"></a>
-<span class="sourceLineNo">392</span>   * @param firstCell a cell in the segment<a name="line.392"></a>
-<span class="sourceLineNo">393</span>   * @return a subset of the segment cell set, which starts with the given cell<a name="line.393"></a>
-<span class="sourceLineNo">394</span>   */<a name="line.394"></a>
-<span class="sourceLineNo">395</span>  protected SortedSet&lt;Cell&gt; tailSet(Cell firstCell) {<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    return getCellSet().tailSet(firstCell);<a name="line.396"></a>
-<span class="sourceLineNo">397</span>  }<a name="line.397"></a>
-<span class="sourceLineNo">398</span><a name="line.398"></a>
-<span class="sourceLineNo">399</span>  @VisibleForTesting<a name="line.399"></a>
-<span class="sourceLineNo">400</span>  MemStoreLAB getMemStoreLAB() {<a name="line.400"></a>
-<span class="sourceLineNo">401</span>    return memStoreLAB;<a name="line.401"></a>
-<span class="sourceLineNo">402</span>  }<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>  // Debug methods<a name="line.404"></a>
-<span class="sourceLineNo">405</span>  /**<a name="line.405"></a>
-<span class="sourceLineNo">406</span>   * Dumps all cells of the segment into the given log<a name="line.406"></a>
-<span class="sourceLineNo">407</span>   */<a name="line.407"></a>
-<span class="sourceLineNo">408</span>  void dump(Logger log) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>    for (Cell cell: getCellSet()) {<a name="line.409"></a>
-<span class="sourceLineNo">410</span>      log.debug(Objects.toString(cell));<a name="line.410"></a>
-<span class="sourceLineNo">411</span>    }<a name="line.411"></a>
-<span class="sourceLineNo">412</span>  }<a name="line.412"></a>
-<span class="sourceLineNo">413</span><a name="line.413"></a>
-<span class="sourceLineNo">414</span>  @Override<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  public String toString() {<a name="line.415"></a>
-<span class="sourceLineNo">416</span>    String res = "type=" + this.getClass().getSimpleName() + ", ";<a name="line.416"></a>
-<span class="sourceLineNo">417</span>    res += "empty=" + (isEmpty()? "yes": "no") + ", ";<a name="line.417"></a>
-<span class="sourceLineNo">418</span>    res += "cellCount=" + getCellsCount() + ", ";<a name="line.418"></a>
-<span class="sourceLineNo">419</span>    res += "cellSize=" + keySize() + ", ";<a name="line.419"></a>
-<span class="sourceLineNo">420</span>    res += "totalHeapSize=" + heapSize() + ", ";<a name="line.420"></a>
-<span class="sourceLineNo">421</span>    res += "min timestamp=" + timeRangeTracker.getMin() + ", ";<a name="line.421"></a>
-<span class="sourceLineNo">422</span>    res += "max timestamp=" + timeRangeTracker.getMax();<a name="line.422"></a>
-<span class="sourceLineNo">423</span>    return res;<a name="line.423"></a>
-<span class="sourceLineNo">424</span>  }<a name="line.424"></a>
-<span class="sourceLineNo">425</span>}<a name="line.425"></a>
+<span class="sourceLineNo">382</span>  /**<a name="line.382"></a>
+<span class="sourceLineNo">383</span>   * Returns a subset of the segment cell set, which starts with the given cell<a name="line.383"></a>
+<span class="sourceLineNo">384</span>   * @param firstCell a cell in the segment<a name="line.384"></a>
+<span class="sourceLineNo">385</span>   * @return a subset of the segment cell set, which starts with the given cell<a name="line.385"></a>
+<span class="sourceLineNo">386</span>   */<a name="line.386"></a>
+<span class="sourceLineNo">387</span>  protected SortedSet&lt;Cell&gt; tailSet(Cell firstCell) {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>    return getCellSet().tailSet(firstCell);<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  }<a name="line.389"></a>
+<span class="sourceLineNo">390</span><a name="line.390"></a>
+<span class="sourceLineNo">391</span>  @VisibleForTesting<a name="line.391"></a>
+<span class="sourceLineNo">392</span>  MemStoreLAB getMemStoreLAB() {<a name="line.392"></a>
+<span class="sourceLineNo">393</span>    return memStoreLAB;<a name="line.393"></a>
+<span class="sourceLineNo">394</span>  }<a name="line.394"></a>
+<span class="sourceLineNo">395</span><a name="line.395"></a>
+<span class="sourceLineNo">396</span>  // Debug methods<a name="line.396"></a>
+<span class="sourceLineNo">397</span>  /**<a name="line.397"></a>
+<span class="sourceLineNo">398</span>   * Dumps all cells of the segment into the given log<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   */<a name="line.399"></a>
+<span class="sourceLineNo">400</span>  void dump(Logger log) {<a name="line.400"></a>
+<span class="sourceLineNo">401</span>    for (Cell cell: getCellSet()) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>      log.debug(Objects.toString(cell));<a name="line.402"></a>
+<span class="sourceLineNo">403</span>    }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>  }<a name="line.404"></a>
+<span class="sourceLineNo">405</span><a name="line.405"></a>
+<span class="sourceLineNo">406</span>  @Override<a name="line.406"></a>
+<span class="sourceLineNo">407</span>  public String toString() {<a name="line.407"></a>
+<span class="sourceLineNo">408</span>    String res = "type=" + this.getClass().getSimpleName() + ", ";<a name="line.408"></a>
+<span class="sourceLineNo">409</span>    res += "empty=" + (isEmpty()? "yes": "no") + ", ";<a name="line.409"></a>
+<span class="sourceLineNo">410</span>    res += "cellCount=" + getCellsCount() + ", ";<a name="line.410"></a>
+<span class="sourceLineNo">411</span>    res += "cellSize=" + keySize() + ", ";<a name="line.411"></a>
+<span class="sourceLineNo">412</span>    res += "totalHeapSize=" + heapSize() + ", ";<a name="line.412"></a>
+<span class="sourceLineNo">413</span>    res += "min timestamp=" + timeRangeTracker.getMin() + ", ";<a name="line.413"></a>
+<span class="sourceLineNo">414</span>    res += "max timestamp=" + timeRangeTracker.getMax();<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return res;<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span>}<a name="line.417"></a>
 
 
 


[31/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index 91b5d7d..4836401 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2874">HBaseFsck.TableInfo.IntegrityFixSuggester</a>
+<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2881">HBaseFsck.TableInfo.IntegrityFixSuggester</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html" title="class in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandlerImpl</a></pre>
 </li>
 </ul>
@@ -267,7 +267,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockListLast">
 <li class="blockList">
 <h4>errors</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2875">errors</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2882">errors</a></pre>
 </li>
 </ul>
 </li>
@@ -284,7 +284,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockListLast">
 <li class="blockList">
 <h4>IntegrityFixSuggester</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2877">IntegrityFixSuggester</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2884">IntegrityFixSuggester</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
                       <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors)</pre>
 </li>
 </ul>
@@ -302,7 +302,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionStartKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2883">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2890">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
                                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleRegionStartKeyNotEmpty-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling case where a Table has a first region that does not
@@ -327,7 +327,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionEndKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2891">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2898">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleRegionEndKeyNotEmpty-byte:A-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling case where a Table has a last region that does not
@@ -351,7 +351,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleDegenerateRegion</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2898">handleDegenerateRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2905">handleDegenerateRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
                             throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleDegenerateRegion-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling a region that has the same start and end key.</div>
@@ -373,7 +373,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleDuplicateStartKeys</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2904">handleDuplicateStartKeys</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2911">handleDuplicateStartKeys</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
                                      <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r2)
                               throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleDuplicateStartKeys-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
@@ -398,7 +398,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleSplit</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2916">handleSplit</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2923">handleSplit</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r2)
                  throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html#handleSplit-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandler</a></code></span></div>
@@ -419,7 +419,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleOverlapInRegionChain</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2928">handleOverlapInRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi1,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2935">handleOverlapInRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi1,
                                        <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi2)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleOverlapInRegionChain-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
@@ -446,7 +446,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockListLast">
 <li class="blockList">
 <h4>handleHoleInRegionChain</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2935">handleHoleInRegionChain</a>(byte[]&nbsp;holeStart,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2942">handleHoleInRegionChain</a>(byte[]&nbsp;holeStart,
                                     byte[]&nbsp;holeStop)
                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleHoleInRegionChain-byte:A-byte:A-">TableIntegrityErrorHandlerImpl</a></code></span></div>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index 60a8d07..67a9310 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2779">HBaseFsck.TableInfo</a>
+<pre>public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2786">HBaseFsck.TableInfo</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 <div class="block">Maintain information about a particular table.</div>
 </li>
@@ -293,7 +293,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>tableName</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2780">tableName</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2787">tableName</a></pre>
 </li>
 </ul>
 <a name="deployedOn">
@@ -302,7 +302,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedOn</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/TreeSet.html?is-external=true" title="class or interface in java.util">TreeSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2781">deployedOn</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/TreeSet.html?is-external=true" title="class or interface in java.util">TreeSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2788">deployedOn</a></pre>
 </li>
 </ul>
 <a name="backwards">
@@ -311,7 +311,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>backwards</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2784">backwards</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2791">backwards</a></pre>
 </li>
 </ul>
 <a name="sidelinedRegions">
@@ -320,7 +320,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>sidelinedRegions</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;org.apache.hadoop.fs.Path,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2787">sidelinedRegions</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;org.apache.hadoop.fs.Path,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2794">sidelinedRegions</a></pre>
 </li>
 </ul>
 <a name="sc">
@@ -329,7 +329,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>sc</h4>
-<pre>final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RegionSplitCalculator.html" title="class in org.apache.hadoop.hbase.util">RegionSplitCalculator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2790">sc</a></pre>
+<pre>final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RegionSplitCalculator.html" title="class in org.apache.hadoop.hbase.util">RegionSplitCalculator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2797">sc</a></pre>
 </li>
 </ul>
 <a name="htds">
@@ -338,7 +338,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>htds</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2793">htds</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2800">htds</a></pre>
 </li>
 </ul>
 <a name="overlapGroups">
@@ -347,7 +347,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>overlapGroups</h4>
-<pre>final&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2796">overlapGroups</a></pre>
+<pre>final&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2803">overlapGroups</a></pre>
 </li>
 </ul>
 <a name="regionsFromMeta">
@@ -356,7 +356,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>regionsFromMeta</h4>
-<pre>private&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2800">regionsFromMeta</a></pre>
+<pre>private&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2807">regionsFromMeta</a></pre>
 </li>
 </ul>
 </li>
@@ -373,7 +373,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>TableInfo</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2802">TableInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;name)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2809">TableInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;name)</pre>
 </li>
 </ul>
 </li>
@@ -390,7 +390,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getHTD</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2810">getHTD</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2817">getHTD</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>descriptor common to all regions.  null if are none or multiple!</dd>
@@ -403,7 +403,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>addRegionInfo</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2820">addRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hir)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2827">addRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hir)</pre>
 </li>
 </ul>
 <a name="addServer-org.apache.hadoop.hbase.ServerName-">
@@ -412,7 +412,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>addServer</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2845">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;server)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2852">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;server)</pre>
 </li>
 </ul>
 <a name="getName--">
@@ -421,7 +421,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getName</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2849">getName</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2856">getName</a>()</pre>
 </li>
 </ul>
 <a name="getNumRegions--">
@@ -430,7 +430,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getNumRegions</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2853">getNumRegions</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2860">getNumRegions</a>()</pre>
 </li>
 </ul>
 <a name="getRegionsFromMeta--">
@@ -439,7 +439,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionsFromMeta</h4>
-<pre>public&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2857">getRegionsFromMeta</a>()</pre>
+<pre>public&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2864">getRegionsFromMeta</a>()</pre>
 </li>
 </ul>
 <a name="checkRegionChain-org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler-">
@@ -448,7 +448,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>checkRegionChain</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3312">checkRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3319">checkRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler)
                          throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Check the region chain (from META) of this table.  We are looking for
  holes, overlaps, and cycles.</div>
@@ -466,7 +466,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>handleOverlapsParallel</h4>
-<pre>private&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3442">handleOverlapsParallel</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler,
+<pre>private&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3449">handleOverlapsParallel</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler,
                                        byte[]&nbsp;prevKey)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
@@ -481,7 +481,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>dump</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3479">dump</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;byte[]&gt;&nbsp;splits,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3486">dump</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;byte[]&gt;&nbsp;splits,
           org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;regions)</pre>
 <div class="block">This dumps data in a visually reasonable way for visual debugging</div>
 <dl>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index b55c738..dbc855f 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4340">HBaseFsck.WorkItemHdfsDir</a>
+<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4348">HBaseFsck.WorkItemHdfsDir</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 <div class="block">Contact hdfs and get all information about specified table directory into
@@ -218,7 +218,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>tableDir</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FileStatus <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4341">tableDir</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.FileStatus <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4349">tableDir</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -227,7 +227,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4342">errors</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4350">errors</a></pre>
 </li>
 </ul>
 <a name="fs">
@@ -236,7 +236,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>fs</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FileSystem <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4343">fs</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.FileSystem <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4351">fs</a></pre>
 </li>
 </ul>
 </li>
@@ -253,7 +253,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemHdfsDir</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4345">WorkItemHdfsDir</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4353">WorkItemHdfsDir</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                 <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors,
                 org.apache.hadoop.fs.FileStatus&nbsp;status)</pre>
 </li>
@@ -272,7 +272,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4353">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4361">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true" title="class or interface in java.lang">InterruptedException</a>,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true" title="class or interface in java.util.concurrent">ExecutionException</a></pre>
 <dl>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index 94d4062..9a926cc 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4460">HBaseFsck.WorkItemHdfsRegionInfo</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4468">HBaseFsck.WorkItemHdfsRegionInfo</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 <div class="block">Contact hdfs and get all information about specified table directory into
@@ -218,7 +218,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>hbi</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4461">hbi</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4469">hbi</a></pre>
 </li>
 </ul>
 <a name="hbck">
@@ -227,7 +227,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>hbck</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4462">hbck</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4470">hbck</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -236,7 +236,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4463">errors</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4471">errors</a></pre>
 </li>
 </ul>
 </li>
@@ -253,7 +253,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemHdfsRegionInfo</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4465">WorkItemHdfsRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4473">WorkItemHdfsRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi,
                        <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a>&nbsp;hbck,
                        <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors)</pre>
 </li>
@@ -272,7 +272,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4472">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4480">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index 50bd593..8ef61a1 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2760">HBaseFsck.WorkItemOverlapMerge</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2767">HBaseFsck.WorkItemOverlapMerge</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 </li>
@@ -211,7 +211,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>handler</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2761">handler</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2768">handler</a></pre>
 </li>
 </ul>
 <a name="overlapgroup">
@@ -220,7 +220,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>overlapgroup</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2762">overlapgroup</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2769">overlapgroup</a></pre>
 </li>
 </ul>
 </li>
@@ -237,7 +237,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemOverlapMerge</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2764">WorkItemOverlapMerge</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlapgroup,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2771">WorkItemOverlapMerge</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlapgroup,
                      <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler)</pre>
 </li>
 </ul>
@@ -255,7 +255,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2770">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2777">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index 851a768..cba31d9 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4275">HBaseFsck.WorkItemRegion</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4283">HBaseFsck.WorkItemRegion</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 <div class="block">Contact a region server and get all information from it</div>
@@ -226,7 +226,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>hbck</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4276">hbck</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4284">hbck</a></pre>
 </li>
 </ul>
 <a name="rsinfo">
@@ -235,7 +235,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>rsinfo</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4277">rsinfo</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4285">rsinfo</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -244,7 +244,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4278">errors</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4286">errors</a></pre>
 </li>
 </ul>
 <a name="connection">
@@ -253,7 +253,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>connection</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4279">connection</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4287">connection</a></pre>
 </li>
 </ul>
 </li>
@@ -270,7 +270,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemRegion</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4281">WorkItemRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a>&nbsp;hbck,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4289">WorkItemRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a>&nbsp;hbck,
                <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;info,
                <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors,
                <a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a>&nbsp;connection)</pre>
@@ -290,7 +290,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4290">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4298">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -306,7 +306,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>filterRegions</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4324">filterRegions</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;regions)</pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4332">filterRegions</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;regions)</pre>
 </li>
 </ul>
 </li>


[21/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a n

<TRUNCATED>

[11/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts());<a name="line.418">

<TRUNCATED>

[08/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index c370eb9..e1bc325 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -6,7 +6,7 @@
 </head>
 <body>
 <div class="sourceContainer">
-<pre><span class="sourceLineNo">001</span>/**<a name="line.1"></a>
+<pre><span class="sourceLineNo">001</span>/*<a name="line.1"></a>
 <span class="sourceLineNo">002</span> * Licensed to the Apache Software Foundation (ASF) under one<a name="line.2"></a>
 <span class="sourceLineNo">003</span> * or more contributor license agreements.  See the NOTICE file<a name="line.3"></a>
 <span class="sourceLineNo">004</span> * distributed with this work for additional information<a name="line.4"></a>
@@ -144,5002 +144,5047 @@
 <span class="sourceLineNo">136</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.136"></a>
 <span class="sourceLineNo">137</span>import org.apache.hadoop.util.Tool;<a name="line.137"></a>
 <span class="sourceLineNo">138</span>import org.apache.hadoop.util.ToolRunner;<a name="line.138"></a>
-<span class="sourceLineNo">139</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.140"></a>
-<span class="sourceLineNo">141</span>import org.apache.zookeeper.KeeperException;<a name="line.141"></a>
-<span class="sourceLineNo">142</span>import org.slf4j.Logger;<a name="line.142"></a>
-<span class="sourceLineNo">143</span>import org.slf4j.LoggerFactory;<a name="line.143"></a>
-<span class="sourceLineNo">144</span><a name="line.144"></a>
-<span class="sourceLineNo">145</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.145"></a>
-<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>/**<a name="line.156"></a>
-<span class="sourceLineNo">157</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.157"></a>
-<span class="sourceLineNo">158</span> * table integrity problems in a corrupted HBase.<a name="line.158"></a>
-<span class="sourceLineNo">159</span> * &lt;p&gt;<a name="line.159"></a>
-<span class="sourceLineNo">160</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.160"></a>
-<span class="sourceLineNo">161</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.161"></a>
-<span class="sourceLineNo">162</span> * accordance.<a name="line.162"></a>
-<span class="sourceLineNo">163</span> * &lt;p&gt;<a name="line.163"></a>
-<span class="sourceLineNo">164</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.164"></a>
-<span class="sourceLineNo">165</span> * one region of a table.  This means there are no individual degenerate<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * or backwards regions; no holes between regions; and that there are no<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * overlapping regions.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * The general repair strategy works in two phases:<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * &lt;ol&gt;<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.171"></a>
-<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * &lt;/ol&gt;<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * &lt;p&gt;<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * a new region is created and all data is merged into the new region.<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * an offline fashion.<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * &lt;p&gt;<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * with proper state in the master.<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * &lt;p&gt;<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * first be called successfully.  Much of the region consistency information<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * is transient and less risky to repair.<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * &lt;p&gt;<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * {@link #printUsageAndExit()} for more details.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> */<a name="line.200"></a>
-<span class="sourceLineNo">201</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.201"></a>
-<span class="sourceLineNo">202</span>@InterfaceStability.Evolving<a name="line.202"></a>
-<span class="sourceLineNo">203</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.203"></a>
-<span class="sourceLineNo">204</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.204"></a>
-<span class="sourceLineNo">205</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.206"></a>
-<span class="sourceLineNo">207</span>  private static boolean rsSupportsOffline = true;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.210"></a>
-<span class="sourceLineNo">211</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.211"></a>
-<span class="sourceLineNo">212</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span><a name="line.223"></a>
-<span class="sourceLineNo">224</span>  /**********************<a name="line.224"></a>
-<span class="sourceLineNo">225</span>   * Internal resources<a name="line.225"></a>
-<span class="sourceLineNo">226</span>   **********************/<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private ClusterMetrics status;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private ClusterConnection connection;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private Admin admin;<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private Table meta;<a name="line.231"></a>
-<span class="sourceLineNo">232</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.232"></a>
-<span class="sourceLineNo">233</span>  protected ExecutorService executor;<a name="line.233"></a>
-<span class="sourceLineNo">234</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.234"></a>
-<span class="sourceLineNo">235</span>  private HFileCorruptionChecker hfcc;<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private int retcode = 0;<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private Path HBCK_LOCK_PATH;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private FSDataOutputStream hbckOutFd;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // successful<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.242"></a>
-<span class="sourceLineNo">243</span><a name="line.243"></a>
-<span class="sourceLineNo">244</span>  /***********<a name="line.244"></a>
-<span class="sourceLineNo">245</span>   * Options<a name="line.245"></a>
-<span class="sourceLineNo">246</span>   ***********/<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private static boolean details = false; // do we display the full report<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.251"></a>
-<span class="sourceLineNo">252</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.252"></a>
-<span class="sourceLineNo">253</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.254"></a>
-<span class="sourceLineNo">255</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.256"></a>
-<span class="sourceLineNo">257</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.257"></a>
-<span class="sourceLineNo">258</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.258"></a>
-<span class="sourceLineNo">259</span>  private boolean removeParents = false; // remove split parents<a name="line.259"></a>
-<span class="sourceLineNo">260</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.260"></a>
-<span class="sourceLineNo">261</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.264"></a>
-<span class="sourceLineNo">265</span><a name="line.265"></a>
-<span class="sourceLineNo">266</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  // hbase:meta are always checked<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  // maximum number of overlapping regions to sideline<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private Path sidelineDir = null;<a name="line.273"></a>
-<span class="sourceLineNo">274</span><a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private static boolean summary = false; // if we want to print less output<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean checkMetaOnly = false;<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean checkRegionBoundaries = false;<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>  /*********<a name="line.281"></a>
-<span class="sourceLineNo">282</span>   * State<a name="line.282"></a>
-<span class="sourceLineNo">283</span>   *********/<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  final private ErrorReporter errors;<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  int fixes = 0;<a name="line.285"></a>
-<span class="sourceLineNo">286</span><a name="line.286"></a>
-<span class="sourceLineNo">287</span>  /**<a name="line.287"></a>
-<span class="sourceLineNo">288</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.288"></a>
-<span class="sourceLineNo">289</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.289"></a>
-<span class="sourceLineNo">290</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.290"></a>
-<span class="sourceLineNo">291</span>   */<a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.294"></a>
-<span class="sourceLineNo">295</span><a name="line.295"></a>
-<span class="sourceLineNo">296</span>  /**<a name="line.296"></a>
-<span class="sourceLineNo">297</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.297"></a>
-<span class="sourceLineNo">298</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * to prevent dupes.<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *<a name="line.300"></a>
-<span class="sourceLineNo">301</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.301"></a>
-<span class="sourceLineNo">302</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.302"></a>
-<span class="sourceLineNo">303</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.303"></a>
-<span class="sourceLineNo">304</span>   * the meta table<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   */<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  /**<a name="line.308"></a>
-<span class="sourceLineNo">309</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.309"></a>
-<span class="sourceLineNo">310</span>   */<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.311"></a>
-<span class="sourceLineNo">312</span><a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.316"></a>
-<span class="sourceLineNo">317</span><a name="line.317"></a>
-<span class="sourceLineNo">318</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">139</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.139"></a>
+<span class="sourceLineNo">140</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.140"></a>
+<span class="sourceLineNo">141</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.141"></a>
+<span class="sourceLineNo">142</span>import org.apache.zookeeper.KeeperException;<a name="line.142"></a>
+<span class="sourceLineNo">143</span>import org.slf4j.Logger;<a name="line.143"></a>
+<span class="sourceLineNo">144</span>import org.slf4j.LoggerFactory;<a name="line.144"></a>
+<span class="sourceLineNo">145</span><a name="line.145"></a>
+<span class="sourceLineNo">146</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.146"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.152"></a>
+<span class="sourceLineNo">153</span><a name="line.153"></a>
+<span class="sourceLineNo">154</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>/**<a name="line.157"></a>
+<span class="sourceLineNo">158</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.158"></a>
+<span class="sourceLineNo">159</span> * table integrity problems in a corrupted HBase.<a name="line.159"></a>
+<span class="sourceLineNo">160</span> * &lt;p&gt;<a name="line.160"></a>
+<span class="sourceLineNo">161</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.161"></a>
+<span class="sourceLineNo">162</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.162"></a>
+<span class="sourceLineNo">163</span> * accordance.<a name="line.163"></a>
+<span class="sourceLineNo">164</span> * &lt;p&gt;<a name="line.164"></a>
+<span class="sourceLineNo">165</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.165"></a>
+<span class="sourceLineNo">166</span> * one region of a table.  This means there are no individual degenerate<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * or backwards regions; no holes between regions; and that there are no<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * overlapping regions.<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * &lt;p&gt;<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * The general repair strategy works in two phases:<a name="line.170"></a>
+<span class="sourceLineNo">171</span> * &lt;ol&gt;<a name="line.171"></a>
+<span class="sourceLineNo">172</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.172"></a>
+<span class="sourceLineNo">173</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * &lt;/ol&gt;<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * &lt;p&gt;<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * a new region is created and all data is merged into the new region.<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * &lt;p&gt;<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * an offline fashion.<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * with proper state in the master.<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * &lt;p&gt;<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * first be called successfully.  Much of the region consistency information<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * is transient and less risky to repair.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * &lt;p&gt;<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * {@link #printUsageAndExit()} for more details.<a name="line.200"></a>
+<span class="sourceLineNo">201</span> */<a name="line.201"></a>
+<span class="sourceLineNo">202</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.202"></a>
+<span class="sourceLineNo">203</span>@InterfaceStability.Evolving<a name="line.203"></a>
+<span class="sourceLineNo">204</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.204"></a>
+<span class="sourceLineNo">205</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.205"></a>
+<span class="sourceLineNo">206</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.207"></a>
+<span class="sourceLineNo">208</span>  private static boolean rsSupportsOffline = true;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.211"></a>
+<span class="sourceLineNo">212</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.212"></a>
+<span class="sourceLineNo">213</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.214"></a>
+<span class="sourceLineNo">215</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.215"></a>
+<span class="sourceLineNo">216</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
+<span class="sourceLineNo">224</span><a name="line.224"></a>
+<span class="sourceLineNo">225</span>  /**********************<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * Internal resources<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   **********************/<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  private ClusterMetrics status;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private ClusterConnection connection;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private Admin admin;<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private Table meta;<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  protected ExecutorService executor;<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  private HFileCorruptionChecker hfcc;<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private int retcode = 0;<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private Path HBCK_LOCK_PATH;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private FSDataOutputStream hbckOutFd;<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.240"></a>
+<span class="sourceLineNo">241</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.241"></a>
+<span class="sourceLineNo">242</span>  // successful<a name="line.242"></a>
+<span class="sourceLineNo">243</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.243"></a>
+<span class="sourceLineNo">244</span><a name="line.244"></a>
+<span class="sourceLineNo">245</span>  // Unsupported options in HBase 2.0+<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.246"></a>
+<span class="sourceLineNo">247</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.247"></a>
+<span class="sourceLineNo">248</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.248"></a>
+<span class="sourceLineNo">249</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.249"></a>
+<span class="sourceLineNo">250</span><a name="line.250"></a>
+<span class="sourceLineNo">251</span>  /***********<a name="line.251"></a>
+<span class="sourceLineNo">252</span>   * Options<a name="line.252"></a>
+<span class="sourceLineNo">253</span>   ***********/<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private static boolean details = false; // do we display the full report<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.260"></a>
+<span class="sourceLineNo">261</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.261"></a>
+<span class="sourceLineNo">262</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.263"></a>
+<span class="sourceLineNo">264</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.265"></a>
+<span class="sourceLineNo">266</span>  private boolean removeParents = false; // remove split parents<a name="line.266"></a>
+<span class="sourceLineNo">267</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.267"></a>
+<span class="sourceLineNo">268</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.268"></a>
+<span class="sourceLineNo">269</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.269"></a>
+<span class="sourceLineNo">270</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.271"></a>
+<span class="sourceLineNo">272</span><a name="line.272"></a>
+<span class="sourceLineNo">273</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  // hbase:meta are always checked<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  // maximum number of overlapping regions to sideline<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private Path sidelineDir = null;<a name="line.280"></a>
+<span class="sourceLineNo">281</span><a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private static boolean summary = false; // if we want to print less output<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean checkMetaOnly = false;<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean checkRegionBoundaries = false;<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.286"></a>
+<span class="sourceLineNo">287</span><a name="line.287"></a>
+<span class="sourceLineNo">288</span>  /*********<a name="line.288"></a>
+<span class="sourceLineNo">289</span>   * State<a name="line.289"></a>
+<span class="sourceLineNo">290</span>   *********/<a name="line.290"></a>
+<span class="sourceLineNo">291</span>  final private ErrorReporter errors;<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  int fixes = 0;<a name="line.292"></a>
+<span class="sourceLineNo">293</span><a name="line.293"></a>
+<span class="sourceLineNo">294</span>  /**<a name="line.294"></a>
+<span class="sourceLineNo">295</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.295"></a>
+<span class="sourceLineNo">296</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.296"></a>
+<span class="sourceLineNo">297</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.297"></a>
+<span class="sourceLineNo">298</span>   */<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.301"></a>
+<span class="sourceLineNo">302</span><a name="line.302"></a>
+<span class="sourceLineNo">303</span>  /**<a name="line.303"></a>
+<span class="sourceLineNo">304</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.304"></a>
+<span class="sourceLineNo">305</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.305"></a>
+<span class="sourceLineNo">306</span>   * to prevent dupes.<a name="line.306"></a>
+<span class="sourceLineNo">307</span>   *<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.309"></a>
+<span class="sourceLineNo">310</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.310"></a>
+<span class="sourceLineNo">311</span>   * the meta table<a name="line.311"></a>
+<span class="sourceLineNo">312</span>   */<a name="line.312"></a>
+<span class="sourceLineNo">313</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.313"></a>
+<span class="sourceLineNo">314</span><a name="line.314"></a>
+<span class="sourceLineNo">315</span>  /**<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.318"></a>
 <span class="sourceLineNo">319</span><a name="line.319"></a>
-<span class="sourceLineNo">320</span>  private ZKWatcher zkw = null;<a name="line.320"></a>
-<span class="sourceLineNo">321</span>  private String hbckEphemeralNodePath = null;<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private boolean hbckZodeCreated = false;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  /**<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * Constructor<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   * @param conf Configuration object<a name="line.327"></a>
-<span class="sourceLineNo">328</span>   * @throws MasterNotRunningException if the master is not running<a name="line.328"></a>
-<span class="sourceLineNo">329</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.329"></a>
-<span class="sourceLineNo">330</span>   */<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.331"></a>
-<span class="sourceLineNo">332</span>    this(conf, createThreadPool(conf));<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  }<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.335"></a>
-<span class="sourceLineNo">336</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  }<a name="line.338"></a>
-<span class="sourceLineNo">339</span><a name="line.339"></a>
-<span class="sourceLineNo">340</span>  /**<a name="line.340"></a>
-<span class="sourceLineNo">341</span>   * Constructor<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   *<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   * @param conf<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   *          Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   *           if the master is not running<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   * @throws ZooKeeperConnectionException<a name="line.347"></a>
-<span class="sourceLineNo">348</span>   *           if unable to connect to ZooKeeper<a name="line.348"></a>
-<span class="sourceLineNo">349</span>   */<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.350"></a>
-<span class="sourceLineNo">351</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.351"></a>
-<span class="sourceLineNo">352</span>    super(conf);<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    errors = getErrorReporter(getConf());<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    this.executor = exec;<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.355"></a>
-<span class="sourceLineNo">356</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.356"></a>
-<span class="sourceLineNo">357</span>      getConf().getInt(<a name="line.357"></a>
-<span class="sourceLineNo">358</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.358"></a>
-<span class="sourceLineNo">359</span>      getConf().getInt(<a name="line.359"></a>
-<span class="sourceLineNo">360</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.360"></a>
-<span class="sourceLineNo">361</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.361"></a>
-<span class="sourceLineNo">362</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.362"></a>
-<span class="sourceLineNo">363</span>      getConf().getInt(<a name="line.363"></a>
-<span class="sourceLineNo">364</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.364"></a>
-<span class="sourceLineNo">365</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
+<span class="sourceLineNo">320</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.321"></a>
+<span class="sourceLineNo">322</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.322"></a>
+<span class="sourceLineNo">323</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.323"></a>
+<span class="sourceLineNo">324</span><a name="line.324"></a>
+<span class="sourceLineNo">325</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  private ZKWatcher zkw = null;<a name="line.327"></a>
+<span class="sourceLineNo">328</span>  private String hbckEphemeralNodePath = null;<a name="line.328"></a>
+<span class="sourceLineNo">329</span>  private boolean hbckZodeCreated = false;<a name="line.329"></a>
+<span class="sourceLineNo">330</span><a name="line.330"></a>
+<span class="sourceLineNo">331</span>  /**<a name="line.331"></a>
+<span class="sourceLineNo">332</span>   * Constructor<a name="line.332"></a>
+<span class="sourceLineNo">333</span>   *<a name="line.333"></a>
+<span class="sourceLineNo">334</span>   * @param conf Configuration object<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * @throws MasterNotRunningException if the master is not running<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.336"></a>
+<span class="sourceLineNo">337</span>   */<a name="line.337"></a>
+<span class="sourceLineNo">338</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.338"></a>
+<span class="sourceLineNo">339</span>    this(conf, createThreadPool(conf));<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  }<a name="line.340"></a>
+<span class="sourceLineNo">341</span><a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.344"></a>
+<span class="sourceLineNo">345</span>  }<a name="line.345"></a>
+<span class="sourceLineNo">346</span><a name="line.346"></a>
+<span class="sourceLineNo">347</span>  /**<a name="line.347"></a>
+<span class="sourceLineNo">348</span>   * Constructor<a name="line.348"></a>
+<span class="sourceLineNo">349</span>   *<a name="line.349"></a>
+<span class="sourceLineNo">350</span>   * @param conf<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   *          Configuration object<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   * @throws MasterNotRunningException<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   *           if the master is not running<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws ZooKeeperConnectionException<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   *           if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.357"></a>
+<span class="sourceLineNo">358</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.358"></a>
+<span class="sourceLineNo">359</span>    super(conf);<a name="line.359"></a>
+<span class="sourceLineNo">360</span>    errors = getErrorReporter(getConf());<a name="line.360"></a>
+<span class="sourceLineNo">361</span>    this.executor = exec;<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.362"></a>
+<span class="sourceLineNo">363</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.363"></a>
+<span class="sourceLineNo">364</span>      getConf().getInt(<a name="line.364"></a>
+<span class="sourceLineNo">365</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.365"></a>
 <span class="sourceLineNo">366</span>      getConf().getInt(<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.367"></a>
-<span class="sourceLineNo">368</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    zkw = createZooKeeperWatcher();<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.372"></a>
-<span class="sourceLineNo">373</span>    RetryCounter retryCounter;<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      this.retryCounter = retryCounter;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    }<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    @Override<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    public FSDataOutputStream call() throws IOException {<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      try {<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        fs.mkdirs(tmpDir);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.386"></a>
-<span class="sourceLineNo">387</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.387"></a>
-<span class="sourceLineNo">388</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.388"></a>
-<span class="sourceLineNo">389</span>        out.flush();<a name="line.389"></a>
-<span class="sourceLineNo">390</span>        return out;<a name="line.390"></a>
-<span class="sourceLineNo">391</span>      } catch(RemoteException e) {<a name="line.391"></a>
-<span class="sourceLineNo">392</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.392"></a>
-<span class="sourceLineNo">393</span>          return null;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>        } else {<a name="line.394"></a>
-<span class="sourceLineNo">395</span>          throw e;<a name="line.395"></a>
-<span class="sourceLineNo">396</span>        }<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      }<a name="line.397"></a>
-<span class="sourceLineNo">398</span>    }<a name="line.398"></a>
-<span class="sourceLineNo">399</span><a name="line.399"></a>
-<span class="sourceLineNo">400</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        throws IOException {<a name="line.402"></a>
-<span class="sourceLineNo">403</span><a name="line.403"></a>
-<span class="sourceLineNo">404</span>      IOException exception = null;<a name="line.404"></a>
-<span class="sourceLineNo">405</span>      do {<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        try {<a name="line.406"></a>
-<span class="sourceLineNo">407</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.407"></a>
-<span class="sourceLineNo">408</span>        } catch (IOException ioe) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.409"></a>
-<span class="sourceLineNo">410</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.410"></a>
-<span class="sourceLineNo">411</span>              + retryCounter.getMaxAttempts());<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.412"></a>
-<span class="sourceLineNo">413</span>              ioe);<a name="line.413"></a>
-<span class="sourceLineNo">414</span>          try {<a name="line.414"></a>
-<span class="sourceLineNo">415</span>            exception = ioe;<a name="line.415"></a>
-<span class="sourceLineNo">416</span>            retryCounter.sleepUntilNextRetry();<a name="line.416"></a>
-<span class="sourceLineNo">417</span>          } catch (InterruptedException ie) {<a name="line.417"></a>
-<span class="sourceLineNo">418</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.418"></a>
-<span class="sourceLineNo">419</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.419"></a>
-<span class="sourceLineNo">420</span>            .initCause(ie);<a name="line.420"></a>
-<span class="sourceLineNo">421</span>          }<a name="line.421"></a>
-<span class="sourceLineNo">422</span>        }<a name="line.422"></a>
-<span class="sourceLineNo">423</span>      } while (retryCounter.shouldRetry());<a name="line.423"></a>
-<span class="sourceLineNo">424</span><a name="line.424"></a>
-<span class="sourceLineNo">425</span>      throw exception;<a name="line.425"></a>
-<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  }<a name="line.427"></a>
-<span class="sourceLineNo">428</span><a name="line.428"></a>
-<span class="sourceLineNo">429</span>  /**<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   *<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.432"></a>
-<span class="sourceLineNo">433</span>   * @throws IOException if IO failure occurs<a name="line.433"></a>
-<span class="sourceLineNo">434</span>   */<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.436"></a>
-<span class="sourceLineNo">437</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    executor.execute(futureTask);<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    FSDataOutputStream stream = null;<a name="line.443"></a>
-<span class="sourceLineNo">444</span>    try {<a name="line.444"></a>
-<span class="sourceLineNo">445</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    } catch (ExecutionException ee) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    } catch (InterruptedException ie) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.449"></a>
-<span class="sourceLineNo">450</span>      Thread.currentThread().interrupt();<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    } catch (TimeoutException exception) {<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      // took too long to obtain lock<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.453"></a>
-<span class="sourceLineNo">454</span>      futureTask.cancel(true);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    } finally {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      executor.shutdownNow();<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    }<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    return stream;<a name="line.458"></a>
-<span class="sourceLineNo">459</span>  }<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>  private void unlockHbck() {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.462"></a>
-<span class="sourceLineNo">463</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      do {<a name="line.464"></a>
-<span class="sourceLineNo">465</span>        try {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              HBCK_LOCK_PATH, true);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          LOG.info("Finishing hbck");<a name="line.469"></a>
-<span class="sourceLineNo">470</span>          return;<a name="line.470"></a>
-<span class="sourceLineNo">471</span>        } catch (IOException ioe) {<a name="line.471"></a>
-<span class="sourceLineNo">472</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.472"></a>
-<span class="sourceLineNo">473</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.473"></a>
-<span class="sourceLineNo">474</span>              + retryCounter.getMaxAttempts());<a name="line.474"></a>
-<span class="sourceLineNo">475</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.475"></a>
-<span class="sourceLineNo">476</span>          try {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>            retryCounter.sleepUntilNextRetry();<a name="line.477"></a>
-<span class="sourceLineNo">478</span>          } catch (InterruptedException ie) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>            Thread.currentThread().interrupt();<a name="line.479"></a>
-<span class="sourceLineNo">480</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.480"></a>
-<span class="sourceLineNo">481</span>                HBCK_LOCK_PATH);<a name="line.481"></a>
-<span class="sourceLineNo">482</span>            return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>        }<a name="line.484"></a>
-<span class="sourceLineNo">485</span>      } while (retryCounter.shouldRetry());<a name="line.485"></a>
-<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
-<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
-<span class="sourceLineNo">488</span><a name="line.488"></a>
-<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * online state.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public void connect() throws IOException {<a name="line.493"></a>
-<span class="sourceLineNo">494</span><a name="line.494"></a>
-<span class="sourceLineNo">495</span>    if (isExclusive()) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>      // Grab the lock<a name="line.496"></a>
-<span class="sourceLineNo">497</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      if (hbckOutFd == null) {<a name="line.498"></a>
-<span class="sourceLineNo">499</span>        setRetCode(-1);<a name="line.499"></a>
-<span class="sourceLineNo">500</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.501"></a>
-<span class="sourceLineNo">502</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.502"></a>
-<span class="sourceLineNo">503</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      // Make sure to cleanup the lock<a name="line.506"></a>
-<span class="sourceLineNo">507</span>      hbckLockCleanup.set(true);<a name="line.507"></a>
-<span class="sourceLineNo">508</span>    }<a name="line.508"></a>
-<span class="sourceLineNo">509</span><a name="line.509"></a>
-<span class="sourceLineNo">510</span><a name="line.510"></a>
-<span class="sourceLineNo">511</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.511"></a>
-<span class="sourceLineNo">512</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    // it is available for further calls<a name="line.513"></a>
-<span class="sourceLineNo">514</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      @Override<a name="line.515"></a>
-<span class="sourceLineNo">516</span>      public void run() {<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.517"></a>
-<span class="sourceLineNo">518</span>        cleanupHbckZnode();<a name="line.518"></a>
-<span class="sourceLineNo">519</span>        unlockHbck();<a name="line.519"></a>
-<span class="sourceLineNo">520</span>      }<a name="line.520"></a>
-<span class="sourceLineNo">521</span>    });<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>    LOG.info("Launching hbck");<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.525"></a>
-<span class="sourceLineNo">526</span>    admin = connection.getAdmin();<a name="line.526"></a>
-<span class="sourceLineNo">527</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.527"></a>
-<span class="sourceLineNo">528</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.528"></a>
-<span class="sourceLineNo">529</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.529"></a>
-<span class="sourceLineNo">530</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.530"></a>
-<span class="sourceLineNo">531</span>  }<a name="line.531"></a>
-<span class="sourceLineNo">532</span><a name="line.532"></a>
-<span class="sourceLineNo">533</span>  /**<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * Get deployed regions according to the region servers.<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   */<a name="line.535"></a>
-<span class="sourceLineNo">536</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    // From the master, get a list of all known live region servers<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.538"></a>
-<span class="sourceLineNo">539</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.539"></a>
-<span class="sourceLineNo">540</span>    if (details) {<a name="line.540"></a>
-<span class="sourceLineNo">541</span>      for (ServerName rsinfo: regionServers) {<a name="line.541"></a>
-<span class="sourceLineNo">542</span>        errors.print("  " + rsinfo.getServerName());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>      }<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    }<a name="line.544"></a>
-<span class="sourceLineNo">545</span><a name="line.545"></a>
-<span class="sourceLineNo">546</span>    // From the master, get a list of all dead region servers<a name="line.546"></a>
-<span class="sourceLineNo">547</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.547"></a>
-<span class="sourceLineNo">548</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>    if (details) {<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      for (ServerName name: deadRegionServers) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        errors.print("  " + name);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      }<a name="line.552"></a>
-<span class="sourceLineNo">553</span>    }<a name="line.553"></a>
-<span class="sourceLineNo">554</span><a name="line.554"></a>
-<span class="sourceLineNo">555</span>    // Print the current master name and state<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Master: " + status.getMasterName());<a name="line.556"></a>
-<span class="sourceLineNo">557</span><a name="line.557"></a>
-<span class="sourceLineNo">558</span>    // Print the list of all backup masters<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    Collection&lt;ServerName&gt; backupMasters = status.getBackupMasterNames();<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    errors.print("Number of backup masters: " + backupMasters.size());<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (details) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      for (ServerName name: backupMasters) {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        errors.print("  " + name);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>      }<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    }<a name="line.565"></a>
-<span class="sourceLineNo">566</span><a name="line.566"></a>
-<span class="sourceLineNo">567</span>    errors.print("Average load: " + status.getAverageLoad());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>    errors.print("Number of requests: " + status.getRequestCount());<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    errors.print("Number of regions: " + status.getRegionCount());<a name="line.569"></a>
-<span class="sourceLineNo">570</span><a name="line.570"></a>
-<span class="sourceLineNo">571</span>    List&lt;RegionState&gt; rits = status.getRegionStatesInTransition();<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    errors.print("Number of regions in transition: " + rits.size());<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    if (details) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>      for (RegionState state: rits) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>        errors.print("  " + state.toDescriptiveString());<a name="line.575"></a>
-<span class="sourceLineNo">576</span>      }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>    }<a name="line.577"></a>
-<span class="sourceLineNo">578</span><a name="line.578"></a>
-<span class="sourceLineNo">579</span>    // Determine what's deployed<a name="line.579"></a>
-<span class="sourceLineNo">580</span>    processRegionServers(regionServers);<a name="line.580"></a>
-<span class="sourceLineNo">581</span>  }<a name="line.581"></a>
-<span class="sourceLineNo">582</span><a name="line.582"></a>
-<span class="sourceLineNo">583</span>  /**<a name="line.583"></a>
-<span class="sourceLineNo">584</span>   * Clear the current state of hbck.<a name="line.584"></a>
-<span class="sourceLineNo">585</span>   */<a name="line.585"></a>
-<span class="sourceLineNo">586</span>  private void clearState() {<a name="line.586"></a>
-<span class="sourceLineNo">587</span>    // Make sure regionInfo is empty before starting<a name="line.587"></a>
-<span class="sourceLineNo">588</span>    fixes = 0;<a name="line.588"></a>
-<span class="sourceLineNo">589</span>    regionInfoMap.clear();<a name="line.589"></a>
-<span class="sourceLineNo">590</span>    emptyRegionInfoQualifiers.clear();<a name="line.590"></a>
-<span class="sourceLineNo">591</span>    tableStates.clear();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    errors.clear();<a name="line.592"></a>
-<span class="sourceLineNo">593</span>    tablesInfo.clear();<a name="line.593"></a>
-<span class="sourceLineNo">594</span>    orphanHdfsDirs.clear();<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    skippedRegions.clear();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>  }<a name="line.596"></a>
-<span class="sourceLineNo">597</span><a name="line.597"></a>
-<span class="sourceLineNo">598</span>  /**<a name="line.598"></a>
-<span class="sourceLineNo">599</span>   * This repair method analyzes hbase data in hdfs and repairs it to satisfy<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * the table integrity rules.  HBase doesn't need to be online for this<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * operation to work.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   */<a name="line.602"></a>
-<span class="sourceLineNo">603</span>  public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {<a name="line.603"></a>
-<span class="sourceLineNo">604</span>    // Initial pass to fix orphans.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    if (shouldCheckHdfs() &amp;&amp; (shouldFixHdfsOrphans() || shouldFixHdfsHoles()<a name="line.605"></a>
-<span class="sourceLineNo">606</span>        || shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>      LOG.info("Loading regioninfos HDFS");<a name="line.607"></a>
-<span class="sourceLineNo">608</span>      // if nothing is happening this should always complete in two iterations.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>      int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);<a name="line.609"></a>
-<span class="sourceLineNo">610</span>      int curIter = 0;<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      do {<a name="line.611"></a>
-<span class="sourceLineNo">612</span>        clearState(); // clears hbck state and reset fixes to 0 and.<a name="line.612"></a>
-<span class="sourceLineNo">613</span>        // repair what's on HDFS<a name="line.613"></a>
-<span class="sourceLineNo">614</span>        restoreHdfsIntegrity();<a name="line.614"></a>
-<span class="sourceLineNo">615</span>        curIter++;// limit the number of iterations.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>      } while (fixes &gt; 0 &amp;&amp; curIter &lt;= maxIterations);<a name="line.616"></a>
-<span class="sourceLineNo">617</span><a name="line.617"></a>
-<span class="sourceLineNo">618</span>      // Repairs should be done in the first iteration and verification in the second.<a name="line.618"></a>
-<span class="sourceLineNo">619</span>      // If there are more than 2 passes, something funny has happened.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      if (curIter &gt; 2) {<a name="line.620"></a>
-<span class="sourceLineNo">621</span>        if (curIter == maxIterations) {<a name="line.621"></a>
-<span class="sourceLineNo">622</span>          LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "<a name="line.622"></a>
-<span class="sourceLineNo">623</span>              + "Tables integrity may not be fully repaired!");<a name="line.623"></a>
-<span class="sourceLineNo">624</span>        } else {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");<a name="line.625"></a>
-<span class="sourceLineNo">626</span>        }<a name="line.626"></a>
-<span class="sourceLineNo">627</span>      }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>    }<a name="line.628"></a>
-<span class="sourceLineNo">629</span>  }<a name="line.629"></a>
-<span class="sourceLineNo">630</span><a name="line.630"></a>
-<span class="sourceLineNo">631</span>  /**<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * This repair method requires the cluster to be online since it contacts<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * region servers and the masters.  It makes each region's state in HDFS, in<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * hbase:meta, and deployments consistent.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   *<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @return If &amp;gt; 0 , number of errors detected, if &amp;lt; 0 there was an unrecoverable<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   *     error.  If 0, we have a clean hbase.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public int onlineConsistencyRepair() throws IOException, KeeperException,<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    InterruptedException {<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    // get regions according to what is online on each RegionServer<a name="line.642"></a>
-<span class="sourceLineNo">643</span>    loadDeployedRegions();<a name="line.643"></a>
-<span class="sourceLineNo">644</span>    // check whether hbase:meta is deployed and online<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    recordMetaRegion();<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    // Check if hbase:meta is found only once and in the right place<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    if (!checkMetaRegion()) {<a name="line.647"></a>
-<span class="sourceLineNo">648</span>      String errorMsg = "hbase:meta table is not consistent. ";<a name="line.648"></a>
-<span class="sourceLineNo">649</span>      if (shouldFixAssignments()) {<a name="line.649"></a>
-<span class="sourceLineNo">650</span>        errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      } else {<a name="line.651"></a>
-<span class="sourceLineNo">652</span>        errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      }<a name="line.653"></a>
-<span class="sourceLineNo">654</span>      errors.reportError(errorMsg + " Exiting...");<a name="line.654"></a>
-<span class="sourceLineNo">655</span>      return -2;<a name="line.655"></a>
-<span class="sourceLineNo">656</span>    }<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    // Not going with further consistency check for tables when hbase:meta itself is not consistent.<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    LOG.info("Loading regionsinfo from the hbase:meta table");<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    boolean success = loadMetaEntries();<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    if (!success) return -1;<a name="line.660"></a>
-<span class="sourceLineNo">661</span><a name="line.661"></a>
-<span class="sourceLineNo">662</span>    // Empty cells in hbase:meta?<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    reportEmptyMetaCells();<a name="line.663"></a>
-<span class="sourceLineNo">664</span><a name="line.664"></a>
-<span class="sourceLineNo">665</span>    // Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    if (shouldFixEmptyMetaCells()) {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>      fixEmptyMetaCells();<a name="line.667"></a>
-<span class="sourceLineNo">668</span>    }<a name="line.668"></a>
-<span class="sourceLineNo">669</span><a name="line.669"></a>
-<span class="sourceLineNo">670</span>    // get a list of all tables that have not changed recently.<a name="line.670"></a>
-<span class="sourceLineNo">671</span>    if (!checkMetaOnly) {<a name="line.671"></a>
-<span class="sourceLineNo">672</span>      reportTablesInFlux();<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Get disabled tables states<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    loadTableStates();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    // load regiondirs and regioninfos from HDFS<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    if (shouldCheckHdfs()) {<a name="line.679"></a>
-<span class="sourceLineNo">680</span>      LOG.info("Loading region directories from HDFS");<a name="line.680"></a>
-<span class="sourceLineNo">681</span>      loadHdfsRegionDirs();<a name="line.681"></a>
-<span class="sourceLineNo">682</span>      LOG.info("Loading region information from HDFS");<a name="line.682"></a>
-<span class="sourceLineNo">683</span>      loadHdfsRegionInfos();<a name="line.683"></a>
-<span class="sourceLineNo">684</span>    }<a name="line.684"></a>
-<span class="sourceLineNo">685</span><a name="line.685"></a>
-<span class="sourceLineNo">686</span>    // fix the orphan tables<a name="line.686"></a>
-<span class="sourceLineNo">687</span>    fixOrphanTables();<a name="line.687"></a>
-<span class="sourceLineNo">688</span><a name="line.688"></a>
-<span class="sourceLineNo">689</span>    LOG.info("Checking and fixing region consistency");<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    // Check and fix consistency<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    checkAndFixConsistency();<a name="line.691"></a>
+<span class="sourceLineNo">367</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.368"></a>
+<span class="sourceLineNo">369</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.369"></a>
+<span class="sourceLineNo">370</span>      getConf().getInt(<a name="line.370"></a>
+<span class="sourceLineNo">371</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.371"></a>
+<span class="sourceLineNo">372</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.372"></a>
+<span class="sourceLineNo">373</span>      getConf().getInt(<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.374"></a>
+<span class="sourceLineNo">375</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.375"></a>
+<span class="sourceLineNo">376</span>    zkw = createZooKeeperWatcher();<a name="line.376"></a>
+<span class="sourceLineNo">377</span>  }<a name="line.377"></a>
+<span class="sourceLineNo">378</span><a name="line.378"></a>
+<span class="sourceLineNo">379</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    RetryCounter retryCounter;<a name="line.380"></a>
+<span class="sourceLineNo">381</span><a name="line.381"></a>
+<span class="sourceLineNo">382</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>      this.retryCounter = retryCounter;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    }<a name="line.384"></a>
+<span class="sourceLineNo">385</span>    @Override<a name="line.385"></a>
+<span class="sourceLineNo">386</span>    public FSDataOutputStream call() throws IOException {<a name="line.386"></a>
+<span class="sourceLineNo">387</span>      try {<a name="line.387"></a>
+<span class="sourceLineNo">388</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.388"></a>
+<span class="sourceLineNo">389</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.389"></a>
+<span class="sourceLineNo">390</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        fs.mkdirs(tmpDir);<a name="line.392"></a>
+<span class="sourceLineNo">393</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.394"></a>
+<span class="sourceLineNo">395</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.395"></a>
+<span class="sourceLineNo">396</span>        out.flush();<a name="line.396"></a>
+<span class="sourceLineNo">397</span>        return out;<a name="line.397"></a>
+<span class="sourceLineNo">398</span>      } catch(RemoteException e) {<a name="line.398"></a>
+<span class="sourceLineNo">399</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.399"></a>
+<span class="sourceLineNo">400</span>          return null;<a name="line.400"></a>
+<span class="sourceLineNo">401</span>        } else {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>          throw e;<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        }<a name="line.403"></a>
+<span class="sourceLineNo">404</span>      }<a name="line.404"></a>
+<span class="sourceLineNo">405</span>    }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.407"></a>
+<span class="sourceLineNo">408</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.408"></a>
+<span class="sourceLineNo">409</span>        throws IOException {<a name="line.409"></a>
+<span class="sourceLineNo">410</span><a name="line.410"></a>
+<span class="sourceLineNo">411</span>      IOException exception = null;<a name="line.411"></a>
+<span class="sourceLineNo">412</span>      do {<a name="line.412"></a>
+<span class="sourceLineNo">413</span>        try {<a name="line.413"></a>
+<span class="sourceLineNo">414</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.414"></a>
+<span class="sourceLineNo">415</span>        } catch (IOException ioe) {<a name="line.415"></a>
+<span class="sourceLineNo">416</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.416"></a>
+<span class="sourceLineNo">417</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.417"></a>
+<span class="sourceLineNo">418</span>              + retryCounter.getMaxAttempts())

<TRUNCATED>

[30/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
index 392306b..2df47c7 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":9,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":9,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":9,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":9,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":9,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":9,"i109":10,"
 i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":9,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":9,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":9,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":9,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":9,"
 i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 <br>
 <pre>@InterfaceAudience.LimitedPrivate(value="Tools")
  @InterfaceStability.Evolving
-public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.203">HBaseFsck</a>
+public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.204">HBaseFsck</a>
 extends org.apache.hadoop.conf.Configured
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true" title="class or interface in java.io">Closeable</a></pre>
 <div class="block">HBaseFsck (hbck) is a tool for checking and repairing region consistency and
@@ -576,6 +576,10 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#TO_BE_LOADED">TO_BE_LOADED</a></span></code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
+<td class="colFirst"><code>private static <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt;</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#unsupportedOptionsInV2">unsupportedOptionsInV2</a></span></code>&nbsp;</td>
+</tr>
+<tr class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/zookeeper/ZKWatcher.html" title="class in org.apache.hadoop.hbase.zookeeper">ZKWatcher</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#zkw">zkw</a></span></code>&nbsp;</td>
 </tr>
@@ -898,406 +902,410 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 </tr>
 <tr id="i51" class="rowColor">
 <td class="colFirst"><code>private boolean</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isOptionsSupported-java.lang.String:A-">isOptionsSupported</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</code>&nbsp;</td>
+</tr>
+<tr id="i52" class="altColor">
+<td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isTableDisabled-org.apache.hadoop.hbase.TableName-">isTableDisabled</a></span>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;tableName)</code>
 <div class="block">Check if the specified region's table is disabled.</div>
 </td>
 </tr>
-<tr id="i52" class="altColor">
+<tr id="i53" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isTableIncluded-org.apache.hadoop.hbase.TableName-">isTableIncluded</a></span>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;table)</code>
 <div class="block">Only check/fix tables specified by the list,
  Empty list means all tables are included.</div>
 </td>
 </tr>
-<tr id="i53" class="rowColor">
+<tr id="i54" class="altColor">
 <td class="colFirst"><code>static byte[]</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#keyOnly-byte:A-">keyOnly</a></span>(byte[]&nbsp;b)</code>&nbsp;</td>
 </tr>
-<tr id="i54" class="altColor">
+<tr id="i55" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadDeployedRegions--">loadDeployedRegions</a></span>()</code>
 <div class="block">Get deployed regions according to the region servers.</div>
 </td>
 </tr>
-<tr id="i55" class="rowColor">
+<tr id="i56" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadHdfsRegionDirs--">loadHdfsRegionDirs</a></span>()</code>
 <div class="block">Scan HDFS for all regions, recording their information into
  regionInfoMap</div>
 </td>
 </tr>
-<tr id="i56" class="altColor">
+<tr id="i57" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadHdfsRegioninfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">loadHdfsRegioninfo</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi)</code>
 <div class="block">Read the .regioninfo file from the file system.</div>
 </td>
 </tr>
-<tr id="i57" class="rowColor">
+<tr id="i58" class="altColor">
 <td class="colFirst"><code>private <a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadHdfsRegionInfos--">loadHdfsRegionInfos</a></span>()</code>
 <div class="block">Populate hbi's from regionInfos loaded from file system.</div>
 </td>
 </tr>
-<tr id="i58" class="altColor">
+<tr id="i59" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadMetaEntries--">loadMetaEntries</a></span>()</code>
 <div class="block">Scan hbase:meta, adding all regions found to the regionInfo map.</div>
 </td>
 </tr>
-<tr id="i59" class="rowColor">
+<tr id="i60" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadTableInfosForTablesWithNoRegion--">loadTableInfosForTablesWithNoRegion</a></span>()</code>
 <div class="block">Loads table info's for tables that may not have been included, since there are no
  regions reported for the table, but table dir is there in hdfs</div>
 </td>
 </tr>
-<tr id="i60" class="altColor">
+<tr id="i61" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadTableStates--">loadTableStates</a></span>()</code>
 <div class="block">Load the list of disabled tables in ZK into local set.</div>
 </td>
 </tr>
-<tr id="i61" class="rowColor">
+<tr id="i62" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#logParallelMerge--">logParallelMerge</a></span>()</code>
 <div class="block">Log an appropriate message about whether or not overlapping merges are computed in parallel.</div>
 </td>
 </tr>
-<tr id="i62" class="altColor">
+<tr id="i63" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#main-java.lang.String:A-">main</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</code>
 <div class="block">Main program</div>
 </td>
 </tr>
-<tr id="i63" class="rowColor">
+<tr id="i64" class="altColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#mergeRegionDirs-org.apache.hadoop.fs.Path-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">mergeRegionDirs</a></span>(org.apache.hadoop.fs.Path&nbsp;targetRegionDir,
                <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;contained)</code>
 <div class="block">Merge hdfs data by moving from contained HbckInfo into targetRegionDir.</div>
 </td>
 </tr>
-<tr id="i64" class="altColor">
+<tr id="i65" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offline-byte:A-">offline</a></span>(byte[]&nbsp;regionName)</code>
 <div class="block">This backwards-compatibility wrapper for permanently offlining a region
  that should not be alive.</div>
 </td>
 </tr>
-<tr id="i65" class="rowColor">
+<tr id="i66" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offlineHdfsIntegrityRepair--">offlineHdfsIntegrityRepair</a></span>()</code>
 <div class="block">This repair method analyzes hbase data in hdfs and repairs it to satisfy
  the table integrity rules.</div>
 </td>
 </tr>
-<tr id="i66" class="altColor">
+<tr id="i67" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offlineHLinkFileRepair--">offlineHLinkFileRepair</a></span>()</code>
 <div class="block">Scan all the store file names to find any lingering HFileLink files,
  which refer to some none-exiting files.</div>
 </td>
 </tr>
-<tr id="i67" class="rowColor">
+<tr id="i68" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offlineReferenceFileRepair--">offlineReferenceFileRepair</a></span>()</code>
 <div class="block">Scan all the store file names to find any lingering reference files,
  which refer to some none-exiting files.</div>
 </td>
 </tr>
-<tr id="i68" class="altColor">
+<tr id="i69" class="rowColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#onlineConsistencyRepair--">onlineConsistencyRepair</a></span>()</code>
 <div class="block">This repair method requires the cluster to be online since it contacts
  region servers and the masters.</div>
 </td>
 </tr>
-<tr id="i69" class="rowColor">
+<tr id="i70" class="altColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#onlineHbck--">onlineHbck</a></span>()</code>
 <div class="block">Contacts the master and prints out cluster-wide information</div>
 </td>
 </tr>
-<tr id="i70" class="altColor">
+<tr id="i71" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#preCheckPermission--">preCheckPermission</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i71" class="rowColor">
+<tr id="i72" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#printTableSummary-java.util.SortedMap-">printTableSummary</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;&nbsp;tablesInfo)</code>
 <div class="block">Prints summary of all tables found on the system.</div>
 </td>
 </tr>
-<tr id="i72" class="altColor">
+<tr id="i73" class="rowColor">
 <td class="colFirst"><code>protected <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#printUsageAndExit--">printUsageAndExit</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i73" class="rowColor">
+<tr id="i74" class="altColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#processRegionServers-java.util.Collection-">processRegionServers</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt;&nbsp;regionServerList)</code>
 <div class="block">Contacts each regionserver and fetches metadata about regions.</div>
 </td>
 </tr>
-<tr id="i74" class="altColor">
+<tr id="i75" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#rebuildMeta-boolean-">rebuildMeta</a></span>(boolean&nbsp;fix)</code>
 <div class="block">Rebuilds meta from information in hdfs/fs.</div>
 </td>
 </tr>
-<tr id="i75" class="rowColor">
+<tr id="i76" class="altColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#recordMetaRegion--">recordMetaRegion</a></span>()</code>
 <div class="block">Record the location of the hbase:meta region as found in ZooKeeper.</div>
 </td>
 </tr>
-<tr id="i76" class="altColor">
+<tr id="i77" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#removeHBCKMetaRecoveryWALDir-java.lang.String-">removeHBCKMetaRecoveryWALDir</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;walFactoryId)</code>
 <div class="block">Removes the empty Meta recovery WAL directory.</div>
 </td>
 </tr>
-<tr id="i77" class="rowColor">
+<tr id="i78" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#reportEmptyMetaCells--">reportEmptyMetaCells</a></span>()</code>
 <div class="block">TODO -- need to add tests for this.</div>
 </td>
 </tr>
-<tr id="i78" class="altColor">
+<tr id="i79" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#reportTablesInFlux--">reportTablesInFlux</a></span>()</code>
 <div class="block">TODO -- need to add tests for this.</div>
 </td>
 </tr>
-<tr id="i79" class="rowColor">
+<tr id="i80" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#resetSplitParent-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">resetSplitParent</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>
 <div class="block">Reset the split parent region info in meta table</div>
 </td>
 </tr>
-<tr id="i80" class="altColor">
+<tr id="i81" class="rowColor">
 <td class="colFirst"><code>private int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#restoreHdfsIntegrity--">restoreHdfsIntegrity</a></span>()</code>
 <div class="block">This method determines if there are table integrity errors in HDFS.</div>
 </td>
 </tr>
-<tr id="i81" class="rowColor">
+<tr id="i82" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setCheckHdfs-boolean-">setCheckHdfs</a></span>(boolean&nbsp;checking)</code>&nbsp;</td>
 </tr>
-<tr id="i82" class="altColor">
+<tr id="i83" class="rowColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setCheckMetaOnly--">setCheckMetaOnly</a></span>()</code>
 <div class="block">Set hbase:meta check mode.</div>
 </td>
 </tr>
-<tr id="i83" class="rowColor">
+<tr id="i84" class="altColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setDisplayFullReport--">setDisplayFullReport</a></span>()</code>
 <div class="block">Display the full report from fsck.</div>
 </td>
 </tr>
-<tr id="i84" class="altColor">
+<tr id="i85" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixAssignments-boolean-">setFixAssignments</a></span>(boolean&nbsp;shouldFix)</code>
 <div class="block">Fix inconsistencies found by fsck.</div>
 </td>
 </tr>
-<tr id="i85" class="rowColor">
+<tr id="i86" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixEmptyMetaCells-boolean-">setFixEmptyMetaCells</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i86" class="altColor">
+<tr id="i87" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHdfsHoles-boolean-">setFixHdfsHoles</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i87" class="rowColor">
+<tr id="i88" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHdfsOrphans-boolean-">setFixHdfsOrphans</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i88" class="altColor">
+<tr id="i89" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHdfsOverlaps-boolean-">setFixHdfsOverlaps</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i89" class="rowColor">
+<tr id="i90" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHFileLinks-boolean-">setFixHFileLinks</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i90" class="altColor">
+<tr id="i91" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixMeta-boolean-">setFixMeta</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i91" class="rowColor">
+<tr id="i92" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixReferenceFiles-boolean-">setFixReferenceFiles</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i92" class="altColor">
+<tr id="i93" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixReplication-boolean-">setFixReplication</a></span>(boolean&nbsp;shouldFix)</code>
 <div class="block">Set replication fix mode.</div>
 </td>
 </tr>
-<tr id="i93" class="rowColor">
+<tr id="i94" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixSplitParents-boolean-">setFixSplitParents</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i94" class="altColor">
+<tr id="i95" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixTableOrphans-boolean-">setFixTableOrphans</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i95" class="rowColor">
+<tr id="i96" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixVersionFile-boolean-">setFixVersionFile</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i96" class="altColor">
+<tr id="i97" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setForceExclusive--">setForceExclusive</a></span>()</code>
 <div class="block">Set exclusive mode.</div>
 </td>
 </tr>
-<tr id="i97" class="rowColor">
+<tr id="i98" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setHFileCorruptionChecker-org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker-">setHFileCorruptionChecker</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a>&nbsp;hfcc)</code>&nbsp;</td>
 </tr>
-<tr id="i98" class="altColor">
+<tr id="i99" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setIgnorePreCheckPermission-boolean-">setIgnorePreCheckPermission</a></span>(boolean&nbsp;ignorePreCheckPermission)</code>&nbsp;</td>
 </tr>
-<tr id="i99" class="rowColor">
+<tr id="i100" class="altColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setMasterInMaintenanceMode--">setMasterInMaintenanceMode</a></span>()</code>
 <div class="block">This method maintains an ephemeral znode.</div>
 </td>
 </tr>
-<tr id="i100" class="altColor">
+<tr id="i101" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setMaxMerge-int-">setMaxMerge</a></span>(int&nbsp;mm)</code>&nbsp;</td>
 </tr>
-<tr id="i101" class="rowColor">
+<tr id="i102" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setMaxOverlapsToSideline-int-">setMaxOverlapsToSideline</a></span>(int&nbsp;mo)</code>&nbsp;</td>
 </tr>
-<tr id="i102" class="altColor">
+<tr id="i103" class="rowColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setRegionBoundariesCheck--">setRegionBoundariesCheck</a></span>()</code>
 <div class="block">Set region boundaries check mode.</div>
 </td>
 </tr>
-<tr id="i103" class="rowColor">
+<tr id="i104" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setRemoveParents-boolean-">setRemoveParents</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i104" class="altColor">
+<tr id="i105" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setRetCode-int-">setRetCode</a></span>(int&nbsp;code)</code>&nbsp;</td>
 </tr>
-<tr id="i105" class="rowColor">
+<tr id="i106" class="altColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setShouldRerun--">setShouldRerun</a></span>()</code>
 <div class="block">Check if we should rerun fsck again.</div>
 </td>
 </tr>
-<tr id="i106" class="altColor">
+<tr id="i107" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setSidelineBigOverlaps-boolean-">setSidelineBigOverlaps</a></span>(boolean&nbsp;sbo)</code>&nbsp;</td>
 </tr>
-<tr id="i107" class="rowColor">
+<tr id="i108" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setSidelineDir-java.lang.String-">setSidelineDir</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;sidelineDir)</code>&nbsp;</td>
 </tr>
-<tr id="i108" class="altColor">
+<tr id="i109" class="rowColor">
 <td class="colFirst"><code>(package private) static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setSummary--">setSummary</a></span>()</code>
 <div class="block">Set summary mode.</div>
 </td>
 </tr>
-<tr id="i109" class="rowColor">
+<tr id="i110" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setTimeLag-long-">setTimeLag</a></span>(long&nbsp;seconds)</code>
 <div class="block">We are interested in only those tables that have not changed their state in
  hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag</div>
 </td>
 </tr>
-<tr id="i110" class="altColor">
+<tr id="i111" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldCheckHdfs--">shouldCheckHdfs</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i111" class="rowColor">
+<tr id="i112" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixAssignments--">shouldFixAssignments</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i112" class="altColor">
+<tr id="i113" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixEmptyMetaCells--">shouldFixEmptyMetaCells</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i113" class="rowColor">
+<tr id="i114" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHdfsHoles--">shouldFixHdfsHoles</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i114" class="altColor">
+<tr id="i115" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHdfsOrphans--">shouldFixHdfsOrphans</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i115" class="rowColor">
+<tr id="i116" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHdfsOverlaps--">shouldFixHdfsOverlaps</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i116" class="altColor">
+<tr id="i117" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHFileLinks--">shouldFixHFileLinks</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i117" class="rowColor">
+<tr id="i118" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixMeta--">shouldFixMeta</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i118" class="altColor">
+<tr id="i119" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixReferenceFiles--">shouldFixReferenceFiles</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i119" class="rowColor">
+<tr id="i120" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixSplitParents--">shouldFixSplitParents</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i120" class="altColor">
+<tr id="i121" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixTableOrphans--">shouldFixTableOrphans</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i121" class="rowColor">
+<tr id="i122" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixVersionFile--">shouldFixVersionFile</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i122" class="altColor">
+<tr id="i123" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldIgnorePreCheckPermission--">shouldIgnorePreCheckPermission</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i123" class="rowColor">
+<tr id="i124" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldRemoveParents--">shouldRemoveParents</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i124" class="altColor">
+<tr id="i125" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldRerun--">shouldRerun</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i125" class="rowColor">
+<tr id="i126" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldSidelineBigOverlaps--">shouldSidelineBigOverlaps</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i126" class="altColor">
+<tr id="i127" class="rowColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineFile-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.fs.Path-org.apache.hadoop.fs.Path-">sidelineFile</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
             org.apache.hadoop.fs.Path&nbsp;hbaseRoot,
             org.apache.hadoop.fs.Path&nbsp;path)</code>&nbsp;</td>
 </tr>
-<tr id="i127" class="rowColor">
+<tr id="i128" class="altColor">
 <td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineOldMeta--">sidelineOldMeta</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i128" class="altColor">
+<tr id="i129" class="rowColor">
 <td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineRegionDir-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">sidelineRegionDir</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>
 <div class="block">Sideline a region dir (instead of deleting it)</div>
 </td>
 </tr>
-<tr id="i129" class="rowColor">
+<tr id="i130" class="altColor">
 <td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineRegionDir-org.apache.hadoop.fs.FileSystem-java.lang.String-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">sidelineRegionDir</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;parentDir,
@@ -1305,7 +1313,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <div class="block">Sideline a region dir (instead of deleting it)</div>
 </td>
 </tr>
-<tr id="i130" class="altColor">
+<tr id="i131" class="rowColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineTable-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.hbase.TableName-org.apache.hadoop.fs.Path-org.apache.hadoop.fs.Path-">sidelineTable</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
              <a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;tableName,
@@ -1314,30 +1322,30 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <div class="block">Side line an entire table.</div>
 </td>
 </tr>
-<tr id="i131" class="rowColor">
+<tr id="i132" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#suggestFixes-java.util.SortedMap-">suggestFixes</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;&nbsp;tablesInfo)</code>
 <div class="block">Suggest fixes for each table</div>
 </td>
 </tr>
-<tr id="i132" class="altColor">
+<tr id="i133" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#tryAssignmentRepair-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-java.lang.String-">tryAssignmentRepair</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi,
                    <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;msg)</code>&nbsp;</td>
 </tr>
-<tr id="i133" class="rowColor">
+<tr id="i134" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#unassignMetaReplica-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">unassignMetaReplica</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>&nbsp;</td>
 </tr>
-<tr id="i134" class="altColor">
+<tr id="i135" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#undeployRegions-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">undeployRegions</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>&nbsp;</td>
 </tr>
-<tr id="i135" class="rowColor">
+<tr id="i136" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#undeployRegionsForHbi-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">undeployRegionsForHbi</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>&nbsp;</td>
 </tr>
-<tr id="i136" class="altColor">
+<tr id="i137" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#unlockHbck--">unlockHbck</a></span>()</code>&nbsp;</td>
 </tr>
@@ -1376,7 +1384,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_TIME_LAG</h4>
-<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.204">DEFAULT_TIME_LAG</a></pre>
+<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.205">DEFAULT_TIME_LAG</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_TIME_LAG">Constant Field Values</a></dd>
@@ -1389,7 +1397,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_SLEEP_BEFORE_RERUN</h4>
-<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.205">DEFAULT_SLEEP_BEFORE_RERUN</a></pre>
+<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.206">DEFAULT_SLEEP_BEFORE_RERUN</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_SLEEP_BEFORE_RERUN">Constant Field Values</a></dd>
@@ -1402,7 +1410,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>MAX_NUM_THREADS</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.206">MAX_NUM_THREADS</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.207">MAX_NUM_THREADS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.MAX_NUM_THREADS">Constant Field Values</a></dd>
@@ -1415,7 +1423,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>rsSupportsOffline</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.207">rsSupportsOffline</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.208">rsSupportsOffline</a></pre>
 </li>
 </ul>
 <a name="DEFAULT_OVERLAPS_TO_SIDELINE">
@@ -1424,7 +1432,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_OVERLAPS_TO_SIDELINE</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.208">DEFAULT_OVERLAPS_TO_SIDELINE</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.209">DEFAULT_OVERLAPS_TO_SIDELINE</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_OVERLAPS_TO_SIDELINE">Constant Field Values</a></dd>
@@ -1437,7 +1445,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_MAX_MERGE</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.209">DEFAULT_MAX_MERGE</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.210">DEFAULT_MAX_MERGE</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_MAX_MERGE">Constant Field Values</a></dd>
@@ -1450,7 +1458,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>TO_BE_LOADED</h4>
-<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.210">TO_BE_LOADED</a></pre>
+<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.211">TO_BE_LOADED</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.TO_BE_LOADED">Constant Field Values</a></dd>
@@ -1463,7 +1471,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>HBCK_LOCK_FILE</h4>
-<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.211">HBCK_LOCK_FILE</a></pre>
+<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.212">HBCK_LOCK_FILE</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.HBCK_LOCK_FILE">Constant Field Values</a></dd>
@@ -1476,7 +1484,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_MAX_LOCK_FILE_ATTEMPTS</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.212">DEFAULT_MAX_LOCK_FILE_ATTEMPTS</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.213">DEFAULT_MAX_LOCK_FILE_ATTEMPTS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_MAX_LOCK_FILE_ATTEMPTS">Constant Field Values</a></dd>
@@ -1489,7 +1497,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.213">DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.214">DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL">Constant Field Values</a></dd>
@@ -1502,7 +1510,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.214">DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.215">DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME">Constant Field Values</a></dd>
@@ -1515,7 +1523,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_WAIT_FOR_LOCK_TIMEOUT</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.219">DEFAULT_WAIT_FOR_LOCK_TIMEOUT</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.220">DEFAULT_WAIT_FOR_LOCK_TIMEOUT</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_WAIT_FOR_LOCK_TIMEOUT">Constant Field Values</a></dd>
@@ -1528,7 +1536,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.220">DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.221">DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS">Constant Field Values</a></dd>
@@ -1541,7 +1549,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.221">DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.222">DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL">Constant Field Values</a></dd>
@@ -1554,7 +1562,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.222">DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.223">DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME">Constant Field Values</a></dd>
@@ -1567,7 +1575,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>LOG</h4>
-<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.227">LOG</a></pre>
+<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.228">LOG</a></pre>
 <div class="block">Internal resources</div>
 </li>
 </ul>
@@ -1577,7 +1585,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>status</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ClusterMetrics.html" title="interface in org.apache.hadoop.hbase">ClusterMetrics</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.228">status</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ClusterMetrics.html" title="interface in org.apache.hadoop.hbase">ClusterMetrics</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.229">status</a></pre>
 </li>
 </ul>
 <a name="connection">
@@ -1586,7 +1594,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>connection</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.229">connection</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.230">connection</a></pre>
 </li>
 </ul>
 <a name="admin">
@@ -1595,7 +1603,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>admin</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Admin.html" title="interface in org.apache.hadoop.hbase.client">Admin</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.230">admin</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Admin.html" title="interface in org.apache.hadoop.hbase.client">Admin</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.231">admin</a></pre>
 </li>
 </ul>
 <a name="meta">
@@ -1604,7 +1612,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>meta</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Table.html" title="interface in org.apache.hadoop.hbase.client">Table</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.231">meta</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Table.html" title="interface in org.apache.hadoop.hbase.client">Table</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.232">meta</a></pre>
 </li>
 </ul>
 <a name="executor">
@@ -1613,7 +1621,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>executor</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true" title="class or interface in java.util.concurrent">ExecutorService</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.233">executor</a></pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true" title="class or interface in java.util.concurrent">ExecutorService</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.234">executor</a></pre>
 </li>
 </ul>
 <a name="startMillis">
@@ -1622,7 +1630,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>startMillis</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.234">startMillis</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.235">startMillis</a></pre>
 </li>
 </ul>
 <a name="hfcc">
@@ -1631,7 +1639,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>hfcc</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.235">hfcc</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.236">hfcc</a></pre>
 </li>
 </ul>
 <a name="retcode">
@@ -1640,7 +1648,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>retcode</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.236">retcode</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.237">retcode</a></pre>
 </li>
 </ul>
 <a name="HBCK_LOCK_PATH">
@@ -1649,7 +1657,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>HBCK_LOCK_PATH</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.237">HBCK_LOCK_PATH</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.238">HBCK_LOCK_PATH</a></pre>
 </li>
 </ul>
 <a name="hbckOutFd">
@@ -1658,7 +1666,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>hbckOutFd</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.238">hbckOutFd</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.239">hbckOutFd</a></pre>
 </li>
 </ul>
 <a name="hbckLockCleanup">
@@ -1667,7 +1675,16 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>hbckLockCleanup</h4>
-<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicBoolean</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.242">hbckLockCleanup</a></pre>
+<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicBoolean</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.243">hbckLockCleanup</a></pre>
+</li>
+</ul>
+<a name="unsupportedOptionsInV2">
+<!--   -->
+</a>
+<ul class="blockList">
+<li class="blockList">
+<h4>unsupportedOptionsInV2</h4>
+<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.246">unsupportedOptionsInV2</a></pre>
 </li>
 </ul>
 <a name="details">
@@ -1676,7 +1693,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>details</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.247">details</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.254">details</a></pre>
 <div class="block">Options</div>
 </li>
 </ul>
@@ -1686,7 +1703,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>timelag</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.248">timelag</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.255">timelag</a></pre>
 </li>
 </ul>
 <a name="forceExclusive">
@@ -1695,7 +1712,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>forceExclusive</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.249">forceExclusive</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.256">forceExclusive</a></pre>
 </li>
 </ul>
 <a name="fixAssignments">
@@ -1704,7 +1721,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixAssignments</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.250">fixAssignments</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.257">fixAssignments</a></pre>
 </li>
 </ul>
 <a name="fixMeta">
@@ -1713,7 +1730,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixMeta</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.251">fixMeta</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.258">fixMeta</a></pre>
 </li>
 </ul>
 <a name="checkHdfs">
@@ -1722,7 +1739,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>checkHdfs</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.252">checkHdfs</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.259">checkHdfs</a></pre>
 </li>
 </ul>
 <a name="fixHdfsHoles">
@@ -1731,7 +1748,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixHdfsHoles</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.253">fixHdfsHoles</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.260">fixHdfsHoles</a></pre>
 </li>
 </ul>
 <a name="fixHdfsOverlaps">
@@ -1740,7 +1757,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixHdfsOverlaps</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.254">fixHdfsOverlaps</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.261">fixHdfsOverlaps</a></pre>
 </li>
 </ul>
 <a name="fixHdfsOrphans">
@@ -1749,7 +1766,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixHdfsOrphans</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.255">fixHdfsOrphans</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.262">fixHdfsOrphans</a></pre>
 </li>
 </ul>
 <a name="fixTableOrphans">
@@ -1758,7 +1775,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixTableOrphans</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.256">fixTableOrphans</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.263">fixTableOrphans</a></pre>
 </li>
 </ul>
 <a name="fixVersionFile">
@@ -1767,7 +1784,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixVersionFile</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.257">fixVersionFile</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.264">fixVersionFile</a></pre>
 </li>
 </ul>
 <a name="fixSplitParents">
@@ -1776,7 +1793,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixSplitParents</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.258">fixSplitParents</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.265">fixSplitParents</a></pre>
 </li>
 </ul>
 <a name="removeParents">
@@ -1785,7 +1802,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>removeParents</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.259">removeParents</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.266">removeParents</a></pre>
 </li>
 </ul>
 <a name="fixReferenceFiles">
@@ -1794,7 +1811,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixReferenceFiles</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.260">fixReferenceFiles</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.267">fixReferenceFiles</a></pre>
 </li>
 </ul>
 <a name="fixHFileLinks">
@@ -1803,7 +1820,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixHFileLinks</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.261">fixHFileLinks</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.268">fixHFileLinks</a></pre>
 </li>
 </ul>
 <a name="fixEmptyMetaCells">
@@ -1812,7 +1829,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixEmptyMetaCells</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.262">fixEmptyMetaCells</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.269">fixEmptyMetaCells</a></pre>
 </li>
 </ul>
 <a name="fixReplication">
@@ -1821,7 +1838,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixReplication</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.263">fixReplication</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.270">fixReplication</a></pre>
 </li>
 </ul>
 <a name="fixAny">
@@ -1830,7 +1847,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixAny</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.264">fixAny</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.271">fixAny</a></pre>
 </li>
 </ul>
 <a name="tablesIncluded">
@@ -1839,7 +1856,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>tablesIncluded</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.268">tablesIncluded</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.275">tablesIncluded</a></pre>
 </li>
 </ul>
 <a name="maxMerge">
@@ -1848,7 +1865,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>maxMerge</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.269">maxMerge</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.276">maxMerge</a></pre>
 </li>
 </ul>
 <a name="maxOverlapsToSideline">
@@ -1857,7 +1874,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>maxOverlapsToSideline</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.271">maxOverlapsToSideline</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.278">maxOverlapsToSideline</a></pre>
 </li>
 </ul>
 <a name="sidelineBigOverlaps">
@@ -1866,7 +1883,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>sidelineBigOverlaps</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.272">sidelineBigOverlaps</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.279">sidelineBigOverlaps</a></pre>
 </li>
 </ul>
 <a name="sidelineDir">
@@ -1875,7 +1892,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>sidelineDir</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.273">sidelineDir</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.280">sidelineDir</a></pre>
 </li>
 </ul>
 <a name="rerun">
@@ -1884,7 +1901,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>rerun</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.275">rerun</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.282">rerun</a></pre>
 </li>
 </ul>
 <a name="summary">
@@ -1893,7 +1910,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>summary</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.276">summary</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.283">summary</a></pre>
 </li>
 </ul>
 <a name="checkMetaOnly">
@@ -1902,7 +1919,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>checkMetaOnly</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.277">checkMetaOnly</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.284">checkMetaOnly</a></pre>
 </li>
 </ul>
 <a name="checkRegionBoundaries">
@@ -1911,7 +1928,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>checkRegionBoundaries</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.278">checkRegionBoundaries</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.285">checkRegionBoundaries</a></pre>
 </li>
 </ul>
 <a name="ignorePreCheckPermission">
@@ -1920,7 +1937,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>ignorePreCheckPermission</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.279">ignorePreCheckPermission</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.286">ignorePreCheckPermission</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -1929,7 +1946,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.284">errors</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.291">errors</a></pre>
 <div class="block">State</div>
 </li>
 </ul>
@@ -1939,7 +1956,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixes</h4>
-<pre>int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.285">fixes</a></pre>
+<pre>int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.292">fixes</a></pre>
 </li>
 </ul>
 <a name="regionInfoMap">
@@ -1948,7 +1965,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>regionInfoMap</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/TreeMap.html?is-external=true" title="class or interface in java.util">TreeMap</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.292">regionInfoMap</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/TreeMap.html?is-external=true" title="class or interface in java.util">TreeMap</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.299">regionInfoMap</a></pre>
 <div class="block">This map contains the state of all hbck items.  It maps from encoded region
  name to HbckInfo structure.  The information contained in HbckInfo is used
  to detect and correct consistency (hdfs/meta/deployment) problems.</div>
@@ -1960,7 +1977,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>emptyRegionInfoQualifiers</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/Result.html" title="class in org.apache.hadoop.hbase.client">Result</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.294">emptyRegionInfoQualifiers</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/Result.html" title="class in org.apache.hadoop.hbase.client">Result</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.301">emptyRegionInfoQualifiers</a></pre>
 </li>
 </ul>
 <a name="tablesInfo">
@@ -1969,7 +1986,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>tablesInfo</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.306">tablesInfo</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.313">tablesInfo</a></pre>
 <div class="block">This map from Tablename -> TableInfo contains the structures necessary to
  detect table consistency problems (holes, dupes, overlaps).  It is sorted
  to prevent dupes.
@@ -1986,7 +2003,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>orphanHdfsDirs</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.311">orphanHdfsDirs</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.318">orphanHdfsDirs</a></pre>
 <div class="block">When initially looking at HDFS, we attempt to find any orphaned data.</div>
 </li>
 </ul>
@@ -1996,7 +2013,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>orphanTableDirs</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt;&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.313">orphanTableDirs</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt;&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.320">orphanTableDirs</a></pre>
 </li>
 </ul>
 <a name="tableStates">
@@ -2005,7 +2022,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>tableStates</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/client/TableState.html" title="class in org.apache.hadoop.hbase.client">TableState</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.314">tableStates</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/client/TableState.html" title="class in org.apache.hadoop.hbase.client">TableState</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.321">tableStates</a></pre>
 </li>
 </ul>
 <a name="lockFileRetryCounterFactory">
@@ -2014,7 +2031,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>lockFileRetryCounterFactory</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.315">lockFileRetryCounterFactory</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.322">lockFileRetryCounterFactory</a></pre>
 </li>
 </ul>
 <a name="createZNodeRetryCounterFactory">
@@ -2023,7 +2040,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>createZNodeRetryCounterFactory</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.316">createZNodeRetryCounterFactory</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.323">createZNodeRetryCounterFactory</a></pre>
 </li>
 </ul>
 <a name="skippedRegions">
@@ -2032,7 +2049,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>skippedRegions</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt;&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.318">skippedRegions</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interfa

<TRUNCATED>

[24/43] hbase-site git commit: Published site at 556b22374423ff087c0583d02ae4298d4d4f2e6b.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
index 9b4f7a7..411bb17 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
@@ -133,100 +133,99 @@
 <span class="sourceLineNo">125</span>   */<a name="line.125"></a>
 <span class="sourceLineNo">126</span>  public List&lt;KeyValueScanner&gt; getScanners(long readPt) throws IOException {<a name="line.126"></a>
 <span class="sourceLineNo">127</span>    List&lt;KeyValueScanner&gt; list = new ArrayList&lt;&gt;();<a name="line.127"></a>
-<span class="sourceLineNo">128</span>    long order = snapshot.getNumOfSegments();<a name="line.128"></a>
-<span class="sourceLineNo">129</span>    order = addToScanners(active, readPt, order, list);<a name="line.129"></a>
-<span class="sourceLineNo">130</span>    addToScanners(snapshot.getAllSegments(), readPt, order, list);<a name="line.130"></a>
-<span class="sourceLineNo">131</span>    return list;<a name="line.131"></a>
-<span class="sourceLineNo">132</span>  }<a name="line.132"></a>
-<span class="sourceLineNo">133</span><a name="line.133"></a>
-<span class="sourceLineNo">134</span>  @Override<a name="line.134"></a>
-<span class="sourceLineNo">135</span>  protected List&lt;Segment&gt; getSegments() throws IOException {<a name="line.135"></a>
-<span class="sourceLineNo">136</span>    List&lt;Segment&gt; list = new ArrayList&lt;&gt;(2);<a name="line.136"></a>
-<span class="sourceLineNo">137</span>    list.add(this.active);<a name="line.137"></a>
-<span class="sourceLineNo">138</span>    list.add(this.snapshot);<a name="line.138"></a>
-<span class="sourceLineNo">139</span>    return list;<a name="line.139"></a>
-<span class="sourceLineNo">140</span>  }<a name="line.140"></a>
-<span class="sourceLineNo">141</span><a name="line.141"></a>
-<span class="sourceLineNo">142</span>  /**<a name="line.142"></a>
-<span class="sourceLineNo">143</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.143"></a>
-<span class="sourceLineNo">144</span>   * first.<a name="line.144"></a>
-<span class="sourceLineNo">145</span>   * @return Next row or null if none found.<a name="line.145"></a>
-<span class="sourceLineNo">146</span>   */<a name="line.146"></a>
-<span class="sourceLineNo">147</span>  Cell getNextRow(final Cell cell) {<a name="line.147"></a>
-<span class="sourceLineNo">148</span>    return getLowest(<a name="line.148"></a>
-<span class="sourceLineNo">149</span>        getNextRow(cell, this.active.getCellSet()),<a name="line.149"></a>
-<span class="sourceLineNo">150</span>        getNextRow(cell, this.snapshot.getCellSet()));<a name="line.150"></a>
-<span class="sourceLineNo">151</span>  }<a name="line.151"></a>
-<span class="sourceLineNo">152</span><a name="line.152"></a>
-<span class="sourceLineNo">153</span>  @Override public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) {<a name="line.153"></a>
-<span class="sourceLineNo">154</span>  }<a name="line.154"></a>
-<span class="sourceLineNo">155</span><a name="line.155"></a>
-<span class="sourceLineNo">156</span>  @Override<a name="line.156"></a>
-<span class="sourceLineNo">157</span>  public MemStoreSize size() {<a name="line.157"></a>
-<span class="sourceLineNo">158</span>    return new MemStoreSize(active.getMemStoreSize());<a name="line.158"></a>
-<span class="sourceLineNo">159</span>  }<a name="line.159"></a>
-<span class="sourceLineNo">160</span><a name="line.160"></a>
-<span class="sourceLineNo">161</span>  /**<a name="line.161"></a>
-<span class="sourceLineNo">162</span>   * Check whether anything need to be done based on the current active set size<a name="line.162"></a>
-<span class="sourceLineNo">163</span>   * Nothing need to be done for the DefaultMemStore<a name="line.163"></a>
-<span class="sourceLineNo">164</span>   */<a name="line.164"></a>
-<span class="sourceLineNo">165</span>  @Override<a name="line.165"></a>
-<span class="sourceLineNo">166</span>  protected void checkActiveSize() {<a name="line.166"></a>
-<span class="sourceLineNo">167</span>    return;<a name="line.167"></a>
-<span class="sourceLineNo">168</span>  }<a name="line.168"></a>
-<span class="sourceLineNo">169</span><a name="line.169"></a>
-<span class="sourceLineNo">170</span>  @Override<a name="line.170"></a>
-<span class="sourceLineNo">171</span>  public long preFlushSeqIDEstimation() {<a name="line.171"></a>
-<span class="sourceLineNo">172</span>    return HConstants.NO_SEQNUM;<a name="line.172"></a>
-<span class="sourceLineNo">173</span>  }<a name="line.173"></a>
-<span class="sourceLineNo">174</span><a name="line.174"></a>
-<span class="sourceLineNo">175</span>  @Override public boolean isSloppy() {<a name="line.175"></a>
-<span class="sourceLineNo">176</span>    return false;<a name="line.176"></a>
-<span class="sourceLineNo">177</span>  }<a name="line.177"></a>
-<span class="sourceLineNo">178</span><a name="line.178"></a>
-<span class="sourceLineNo">179</span>  /**<a name="line.179"></a>
-<span class="sourceLineNo">180</span>   * Code to help figure if our approximation of object heap sizes is close<a name="line.180"></a>
-<span class="sourceLineNo">181</span>   * enough.  See hbase-900.  Fills memstores then waits so user can heap<a name="line.181"></a>
-<span class="sourceLineNo">182</span>   * dump and bring up resultant hprof in something like jprofiler which<a name="line.182"></a>
-<span class="sourceLineNo">183</span>   * allows you get 'deep size' on objects.<a name="line.183"></a>
-<span class="sourceLineNo">184</span>   * @param args main args<a name="line.184"></a>
-<span class="sourceLineNo">185</span>   */<a name="line.185"></a>
-<span class="sourceLineNo">186</span>  public static void main(String [] args) {<a name="line.186"></a>
-<span class="sourceLineNo">187</span>    RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();<a name="line.187"></a>
-<span class="sourceLineNo">188</span>    LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +<a name="line.188"></a>
-<span class="sourceLineNo">189</span>      runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());<a name="line.189"></a>
-<span class="sourceLineNo">190</span>    LOG.info("vmInputArguments=" + runtime.getInputArguments());<a name="line.190"></a>
-<span class="sourceLineNo">191</span>    DefaultMemStore memstore1 = new DefaultMemStore();<a name="line.191"></a>
-<span class="sourceLineNo">192</span>    // TODO: x32 vs x64<a name="line.192"></a>
-<span class="sourceLineNo">193</span>    final int count = 10000;<a name="line.193"></a>
-<span class="sourceLineNo">194</span>    byte [] fam = Bytes.toBytes("col");<a name="line.194"></a>
-<span class="sourceLineNo">195</span>    byte [] qf = Bytes.toBytes("umn");<a name="line.195"></a>
-<span class="sourceLineNo">196</span>    byte [] empty = new byte[0];<a name="line.196"></a>
-<span class="sourceLineNo">197</span>    MemStoreSizing memstoreSizing = new MemStoreSizing();<a name="line.197"></a>
-<span class="sourceLineNo">198</span>    for (int i = 0; i &lt; count; i++) {<a name="line.198"></a>
-<span class="sourceLineNo">199</span>      // Give each its own ts<a name="line.199"></a>
-<span class="sourceLineNo">200</span>      memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSizing);<a name="line.200"></a>
-<span class="sourceLineNo">201</span>    }<a name="line.201"></a>
-<span class="sourceLineNo">202</span>    LOG.info("memstore1 estimated size="<a name="line.202"></a>
-<span class="sourceLineNo">203</span>        + (memstoreSizing.getDataSize() + memstoreSizing.getHeapSize()));<a name="line.203"></a>
-<span class="sourceLineNo">204</span>    for (int i = 0; i &lt; count; i++) {<a name="line.204"></a>
-<span class="sourceLineNo">205</span>      memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSizing);<a name="line.205"></a>
-<span class="sourceLineNo">206</span>    }<a name="line.206"></a>
-<span class="sourceLineNo">207</span>    LOG.info("memstore1 estimated size (2nd loading of same data)="<a name="line.207"></a>
-<span class="sourceLineNo">208</span>        + (memstoreSizing.getDataSize() + memstoreSizing.getHeapSize()));<a name="line.208"></a>
-<span class="sourceLineNo">209</span>    // Make a variably sized memstore.<a name="line.209"></a>
-<span class="sourceLineNo">210</span>    DefaultMemStore memstore2 = new DefaultMemStore();<a name="line.210"></a>
-<span class="sourceLineNo">211</span>    memstoreSizing = new MemStoreSizing();<a name="line.211"></a>
-<span class="sourceLineNo">212</span>    for (int i = 0; i &lt; count; i++) {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>      memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memstoreSizing);<a name="line.213"></a>
-<span class="sourceLineNo">214</span>    }<a name="line.214"></a>
-<span class="sourceLineNo">215</span>    LOG.info("memstore2 estimated size="<a name="line.215"></a>
-<span class="sourceLineNo">216</span>        + (memstoreSizing.getDataSize() + memstoreSizing.getHeapSize()));<a name="line.216"></a>
-<span class="sourceLineNo">217</span>    final int seconds = 30;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>    LOG.info("Waiting " + seconds + " seconds while heap dump is taken");<a name="line.218"></a>
-<span class="sourceLineNo">219</span>    LOG.info("Exiting.");<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  }<a name="line.220"></a>
-<span class="sourceLineNo">221</span>}<a name="line.221"></a>
+<span class="sourceLineNo">128</span>    addToScanners(active, readPt, list);<a name="line.128"></a>
+<span class="sourceLineNo">129</span>    addToScanners(snapshot.getAllSegments(), readPt, list);<a name="line.129"></a>
+<span class="sourceLineNo">130</span>    return list;<a name="line.130"></a>
+<span class="sourceLineNo">131</span>  }<a name="line.131"></a>
+<span class="sourceLineNo">132</span><a name="line.132"></a>
+<span class="sourceLineNo">133</span>  @Override<a name="line.133"></a>
+<span class="sourceLineNo">134</span>  protected List&lt;Segment&gt; getSegments() throws IOException {<a name="line.134"></a>
+<span class="sourceLineNo">135</span>    List&lt;Segment&gt; list = new ArrayList&lt;&gt;(2);<a name="line.135"></a>
+<span class="sourceLineNo">136</span>    list.add(this.active);<a name="line.136"></a>
+<span class="sourceLineNo">137</span>    list.add(this.snapshot);<a name="line.137"></a>
+<span class="sourceLineNo">138</span>    return list;<a name="line.138"></a>
+<span class="sourceLineNo">139</span>  }<a name="line.139"></a>
+<span class="sourceLineNo">140</span><a name="line.140"></a>
+<span class="sourceLineNo">141</span>  /**<a name="line.141"></a>
+<span class="sourceLineNo">142</span>   * @param cell Find the row that comes after this one.  If null, we return the<a name="line.142"></a>
+<span class="sourceLineNo">143</span>   * first.<a name="line.143"></a>
+<span class="sourceLineNo">144</span>   * @return Next row or null if none found.<a name="line.144"></a>
+<span class="sourceLineNo">145</span>   */<a name="line.145"></a>
+<span class="sourceLineNo">146</span>  Cell getNextRow(final Cell cell) {<a name="line.146"></a>
+<span class="sourceLineNo">147</span>    return getLowest(<a name="line.147"></a>
+<span class="sourceLineNo">148</span>        getNextRow(cell, this.active.getCellSet()),<a name="line.148"></a>
+<span class="sourceLineNo">149</span>        getNextRow(cell, this.snapshot.getCellSet()));<a name="line.149"></a>
+<span class="sourceLineNo">150</span>  }<a name="line.150"></a>
+<span class="sourceLineNo">151</span><a name="line.151"></a>
+<span class="sourceLineNo">152</span>  @Override public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) {<a name="line.152"></a>
+<span class="sourceLineNo">153</span>  }<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>  @Override<a name="line.155"></a>
+<span class="sourceLineNo">156</span>  public MemStoreSize size() {<a name="line.156"></a>
+<span class="sourceLineNo">157</span>    return new MemStoreSize(active.getMemStoreSize());<a name="line.157"></a>
+<span class="sourceLineNo">158</span>  }<a name="line.158"></a>
+<span class="sourceLineNo">159</span><a name="line.159"></a>
+<span class="sourceLineNo">160</span>  /**<a name="line.160"></a>
+<span class="sourceLineNo">161</span>   * Check whether anything need to be done based on the current active set size<a name="line.161"></a>
+<span class="sourceLineNo">162</span>   * Nothing need to be done for the DefaultMemStore<a name="line.162"></a>
+<span class="sourceLineNo">163</span>   */<a name="line.163"></a>
+<span class="sourceLineNo">164</span>  @Override<a name="line.164"></a>
+<span class="sourceLineNo">165</span>  protected void checkActiveSize() {<a name="line.165"></a>
+<span class="sourceLineNo">166</span>    return;<a name="line.166"></a>
+<span class="sourceLineNo">167</span>  }<a name="line.167"></a>
+<span class="sourceLineNo">168</span><a name="line.168"></a>
+<span class="sourceLineNo">169</span>  @Override<a name="line.169"></a>
+<span class="sourceLineNo">170</span>  public long preFlushSeqIDEstimation() {<a name="line.170"></a>
+<span class="sourceLineNo">171</span>    return HConstants.NO_SEQNUM;<a name="line.171"></a>
+<span class="sourceLineNo">172</span>  }<a name="line.172"></a>
+<span class="sourceLineNo">173</span><a name="line.173"></a>
+<span class="sourceLineNo">174</span>  @Override public boolean isSloppy() {<a name="line.174"></a>
+<span class="sourceLineNo">175</span>    return false;<a name="line.175"></a>
+<span class="sourceLineNo">176</span>  }<a name="line.176"></a>
+<span class="sourceLineNo">177</span><a name="line.177"></a>
+<span class="sourceLineNo">178</span>  /**<a name="line.178"></a>
+<span class="sourceLineNo">179</span>   * Code to help figure if our approximation of object heap sizes is close<a name="line.179"></a>
+<span class="sourceLineNo">180</span>   * enough.  See hbase-900.  Fills memstores then waits so user can heap<a name="line.180"></a>
+<span class="sourceLineNo">181</span>   * dump and bring up resultant hprof in something like jprofiler which<a name="line.181"></a>
+<span class="sourceLineNo">182</span>   * allows you get 'deep size' on objects.<a name="line.182"></a>
+<span class="sourceLineNo">183</span>   * @param args main args<a name="line.183"></a>
+<span class="sourceLineNo">184</span>   */<a name="line.184"></a>
+<span class="sourceLineNo">185</span>  public static void main(String [] args) {<a name="line.185"></a>
+<span class="sourceLineNo">186</span>    RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();<a name="line.186"></a>
+<span class="sourceLineNo">187</span>    LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +<a name="line.187"></a>
+<span class="sourceLineNo">188</span>      runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());<a name="line.188"></a>
+<span class="sourceLineNo">189</span>    LOG.info("vmInputArguments=" + runtime.getInputArguments());<a name="line.189"></a>
+<span class="sourceLineNo">190</span>    DefaultMemStore memstore1 = new DefaultMemStore();<a name="line.190"></a>
+<span class="sourceLineNo">191</span>    // TODO: x32 vs x64<a name="line.191"></a>
+<span class="sourceLineNo">192</span>    final int count = 10000;<a name="line.192"></a>
+<span class="sourceLineNo">193</span>    byte [] fam = Bytes.toBytes("col");<a name="line.193"></a>
+<span class="sourceLineNo">194</span>    byte [] qf = Bytes.toBytes("umn");<a name="line.194"></a>
+<span class="sourceLineNo">195</span>    byte [] empty = new byte[0];<a name="line.195"></a>
+<span class="sourceLineNo">196</span>    MemStoreSizing memstoreSizing = new MemStoreSizing();<a name="line.196"></a>
+<span class="sourceLineNo">197</span>    for (int i = 0; i &lt; count; i++) {<a name="line.197"></a>
+<span class="sourceLineNo">198</span>      // Give each its own ts<a name="line.198"></a>
+<span class="sourceLineNo">199</span>      memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSizing);<a name="line.199"></a>
+<span class="sourceLineNo">200</span>    }<a name="line.200"></a>
+<span class="sourceLineNo">201</span>    LOG.info("memstore1 estimated size="<a name="line.201"></a>
+<span class="sourceLineNo">202</span>        + (memstoreSizing.getDataSize() + memstoreSizing.getHeapSize()));<a name="line.202"></a>
+<span class="sourceLineNo">203</span>    for (int i = 0; i &lt; count; i++) {<a name="line.203"></a>
+<span class="sourceLineNo">204</span>      memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memstoreSizing);<a name="line.204"></a>
+<span class="sourceLineNo">205</span>    }<a name="line.205"></a>
+<span class="sourceLineNo">206</span>    LOG.info("memstore1 estimated size (2nd loading of same data)="<a name="line.206"></a>
+<span class="sourceLineNo">207</span>        + (memstoreSizing.getDataSize() + memstoreSizing.getHeapSize()));<a name="line.207"></a>
+<span class="sourceLineNo">208</span>    // Make a variably sized memstore.<a name="line.208"></a>
+<span class="sourceLineNo">209</span>    DefaultMemStore memstore2 = new DefaultMemStore();<a name="line.209"></a>
+<span class="sourceLineNo">210</span>    memstoreSizing = new MemStoreSizing();<a name="line.210"></a>
+<span class="sourceLineNo">211</span>    for (int i = 0; i &lt; count; i++) {<a name="line.211"></a>
+<span class="sourceLineNo">212</span>      memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memstoreSizing);<a name="line.212"></a>
+<span class="sourceLineNo">213</span>    }<a name="line.213"></a>
+<span class="sourceLineNo">214</span>    LOG.info("memstore2 estimated size="<a name="line.214"></a>
+<span class="sourceLineNo">215</span>        + (memstoreSizing.getDataSize() + memstoreSizing.getHeapSize()));<a name="line.215"></a>
+<span class="sourceLineNo">216</span>    final int seconds = 30;<a name="line.216"></a>
+<span class="sourceLineNo">217</span>    LOG.info("Waiting " + seconds + " seconds while heap dump is taken");<a name="line.217"></a>
+<span class="sourceLineNo">218</span>    LOG.info("Exiting.");<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  }<a name="line.219"></a>
+<span class="sourceLineNo">220</span>}<a name="line.220"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
index 05ee7d2..0cae7c6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
@@ -423,41 +423,34 @@
 <span class="sourceLineNo">415</span>    return this.heap;<a name="line.415"></a>
 <span class="sourceLineNo">416</span>  }<a name="line.416"></a>
 <span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  /**<a name="line.418"></a>
-<span class="sourceLineNo">419</span>   * @see KeyValueScanner#getScannerOrder()<a name="line.419"></a>
-<span class="sourceLineNo">420</span>   */<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  @Override<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  public long getScannerOrder() {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>    return 0;<a name="line.423"></a>
-<span class="sourceLineNo">424</span>  }<a name="line.424"></a>
-<span class="sourceLineNo">425</span><a name="line.425"></a>
-<span class="sourceLineNo">426</span>  @VisibleForTesting<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  KeyValueScanner getCurrentForTesting() {<a name="line.427"></a>
-<span class="sourceLineNo">428</span>    return current;<a name="line.428"></a>
-<span class="sourceLineNo">429</span>  }<a name="line.429"></a>
-<span class="sourceLineNo">430</span><a name="line.430"></a>
-<span class="sourceLineNo">431</span>  @Override<a name="line.431"></a>
-<span class="sourceLineNo">432</span>  public Cell getNextIndexedKey() {<a name="line.432"></a>
-<span class="sourceLineNo">433</span>    // here we return the next index key from the top scanner<a name="line.433"></a>
-<span class="sourceLineNo">434</span>    return current == null ? null : current.getNextIndexedKey();<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  }<a name="line.435"></a>
-<span class="sourceLineNo">436</span><a name="line.436"></a>
-<span class="sourceLineNo">437</span>  @Override<a name="line.437"></a>
-<span class="sourceLineNo">438</span>  public void shipped() throws IOException {<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    for (KeyValueScanner scanner : this.scannersForDelayedClose) {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      scanner.close(); // There wont be further fetch of Cells from these scanners. Just close.<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    }<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    this.scannersForDelayedClose.clear();<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    if (this.current != null) {<a name="line.443"></a>
-<span class="sourceLineNo">444</span>      this.current.shipped();<a name="line.444"></a>
-<span class="sourceLineNo">445</span>    }<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    if (this.heap != null) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      for (KeyValueScanner scanner : this.heap) {<a name="line.447"></a>
-<span class="sourceLineNo">448</span>        scanner.shipped();<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      }<a name="line.449"></a>
-<span class="sourceLineNo">450</span>    }<a name="line.450"></a>
-<span class="sourceLineNo">451</span>  }<a name="line.451"></a>
-<span class="sourceLineNo">452</span>}<a name="line.452"></a>
+<span class="sourceLineNo">418</span><a name="line.418"></a>
+<span class="sourceLineNo">419</span>  @VisibleForTesting<a name="line.419"></a>
+<span class="sourceLineNo">420</span>  KeyValueScanner getCurrentForTesting() {<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    return current;<a name="line.421"></a>
+<span class="sourceLineNo">422</span>  }<a name="line.422"></a>
+<span class="sourceLineNo">423</span><a name="line.423"></a>
+<span class="sourceLineNo">424</span>  @Override<a name="line.424"></a>
+<span class="sourceLineNo">425</span>  public Cell getNextIndexedKey() {<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    // here we return the next index key from the top scanner<a name="line.426"></a>
+<span class="sourceLineNo">427</span>    return current == null ? null : current.getNextIndexedKey();<a name="line.427"></a>
+<span class="sourceLineNo">428</span>  }<a name="line.428"></a>
+<span class="sourceLineNo">429</span><a name="line.429"></a>
+<span class="sourceLineNo">430</span>  @Override<a name="line.430"></a>
+<span class="sourceLineNo">431</span>  public void shipped() throws IOException {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>    for (KeyValueScanner scanner : this.scannersForDelayedClose) {<a name="line.432"></a>
+<span class="sourceLineNo">433</span>      scanner.close(); // There wont be further fetch of Cells from these scanners. Just close.<a name="line.433"></a>
+<span class="sourceLineNo">434</span>    }<a name="line.434"></a>
+<span class="sourceLineNo">435</span>    this.scannersForDelayedClose.clear();<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    if (this.current != null) {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      this.current.shipped();<a name="line.437"></a>
+<span class="sourceLineNo">438</span>    }<a name="line.438"></a>
+<span class="sourceLineNo">439</span>    if (this.heap != null) {<a name="line.439"></a>
+<span class="sourceLineNo">440</span>      for (KeyValueScanner scanner : this.heap) {<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        scanner.shipped();<a name="line.441"></a>
+<span class="sourceLineNo">442</span>      }<a name="line.442"></a>
+<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
+<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
+<span class="sourceLineNo">445</span>}<a name="line.445"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
index 05ee7d2..0cae7c6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
@@ -423,41 +423,34 @@
 <span class="sourceLineNo">415</span>    return this.heap;<a name="line.415"></a>
 <span class="sourceLineNo">416</span>  }<a name="line.416"></a>
 <span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  /**<a name="line.418"></a>
-<span class="sourceLineNo">419</span>   * @see KeyValueScanner#getScannerOrder()<a name="line.419"></a>
-<span class="sourceLineNo">420</span>   */<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  @Override<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  public long getScannerOrder() {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>    return 0;<a name="line.423"></a>
-<span class="sourceLineNo">424</span>  }<a name="line.424"></a>
-<span class="sourceLineNo">425</span><a name="line.425"></a>
-<span class="sourceLineNo">426</span>  @VisibleForTesting<a name="line.426"></a>
-<span class="sourceLineNo">427</span>  KeyValueScanner getCurrentForTesting() {<a name="line.427"></a>
-<span class="sourceLineNo">428</span>    return current;<a name="line.428"></a>
-<span class="sourceLineNo">429</span>  }<a name="line.429"></a>
-<span class="sourceLineNo">430</span><a name="line.430"></a>
-<span class="sourceLineNo">431</span>  @Override<a name="line.431"></a>
-<span class="sourceLineNo">432</span>  public Cell getNextIndexedKey() {<a name="line.432"></a>
-<span class="sourceLineNo">433</span>    // here we return the next index key from the top scanner<a name="line.433"></a>
-<span class="sourceLineNo">434</span>    return current == null ? null : current.getNextIndexedKey();<a name="line.434"></a>
-<span class="sourceLineNo">435</span>  }<a name="line.435"></a>
-<span class="sourceLineNo">436</span><a name="line.436"></a>
-<span class="sourceLineNo">437</span>  @Override<a name="line.437"></a>
-<span class="sourceLineNo">438</span>  public void shipped() throws IOException {<a name="line.438"></a>
-<span class="sourceLineNo">439</span>    for (KeyValueScanner scanner : this.scannersForDelayedClose) {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      scanner.close(); // There wont be further fetch of Cells from these scanners. Just close.<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    }<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    this.scannersForDelayedClose.clear();<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    if (this.current != null) {<a name="line.443"></a>
-<span class="sourceLineNo">444</span>      this.current.shipped();<a name="line.444"></a>
-<span class="sourceLineNo">445</span>    }<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    if (this.heap != null) {<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      for (KeyValueScanner scanner : this.heap) {<a name="line.447"></a>
-<span class="sourceLineNo">448</span>        scanner.shipped();<a name="line.448"></a>
-<span class="sourceLineNo">449</span>      }<a name="line.449"></a>
-<span class="sourceLineNo">450</span>    }<a name="line.450"></a>
-<span class="sourceLineNo">451</span>  }<a name="line.451"></a>
-<span class="sourceLineNo">452</span>}<a name="line.452"></a>
+<span class="sourceLineNo">418</span><a name="line.418"></a>
+<span class="sourceLineNo">419</span>  @VisibleForTesting<a name="line.419"></a>
+<span class="sourceLineNo">420</span>  KeyValueScanner getCurrentForTesting() {<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    return current;<a name="line.421"></a>
+<span class="sourceLineNo">422</span>  }<a name="line.422"></a>
+<span class="sourceLineNo">423</span><a name="line.423"></a>
+<span class="sourceLineNo">424</span>  @Override<a name="line.424"></a>
+<span class="sourceLineNo">425</span>  public Cell getNextIndexedKey() {<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    // here we return the next index key from the top scanner<a name="line.426"></a>
+<span class="sourceLineNo">427</span>    return current == null ? null : current.getNextIndexedKey();<a name="line.427"></a>
+<span class="sourceLineNo">428</span>  }<a name="line.428"></a>
+<span class="sourceLineNo">429</span><a name="line.429"></a>
+<span class="sourceLineNo">430</span>  @Override<a name="line.430"></a>
+<span class="sourceLineNo">431</span>  public void shipped() throws IOException {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>    for (KeyValueScanner scanner : this.scannersForDelayedClose) {<a name="line.432"></a>
+<span class="sourceLineNo">433</span>      scanner.close(); // There wont be further fetch of Cells from these scanners. Just close.<a name="line.433"></a>
+<span class="sourceLineNo">434</span>    }<a name="line.434"></a>
+<span class="sourceLineNo">435</span>    this.scannersForDelayedClose.clear();<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    if (this.current != null) {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      this.current.shipped();<a name="line.437"></a>
+<span class="sourceLineNo">438</span>    }<a name="line.438"></a>
+<span class="sourceLineNo">439</span>    if (this.heap != null) {<a name="line.439"></a>
+<span class="sourceLineNo">440</span>      for (KeyValueScanner scanner : this.heap) {<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        scanner.shipped();<a name="line.441"></a>
+<span class="sourceLineNo">442</span>      }<a name="line.442"></a>
+<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
+<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
+<span class="sourceLineNo">445</span>}<a name="line.445"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
index bd13ffa..5c22c49 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
@@ -81,13 +81,13 @@
 <span class="sourceLineNo">073</span>  boolean reseek(Cell key) throws IOException;<a name="line.73"></a>
 <span class="sourceLineNo">074</span><a name="line.74"></a>
 <span class="sourceLineNo">075</span>  /**<a name="line.75"></a>
-<span class="sourceLineNo">076</span>   * Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners and<a name="line.76"></a>
-<span class="sourceLineNo">077</span>   * MemStoreScanners (other scanners simply return 0). This is required for comparing multiple<a name="line.77"></a>
-<span class="sourceLineNo">078</span>   * files to find out which one has the latest data. StoreFileScanners are ordered from 0<a name="line.78"></a>
-<span class="sourceLineNo">079</span>   * (oldest) to newest in increasing order. MemStoreScanner gets LONG.max since it always<a name="line.79"></a>
-<span class="sourceLineNo">080</span>   * contains freshest data.<a name="line.80"></a>
-<span class="sourceLineNo">081</span>   */<a name="line.81"></a>
-<span class="sourceLineNo">082</span>  long getScannerOrder();<a name="line.82"></a>
+<span class="sourceLineNo">076</span>   * Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners.<a name="line.76"></a>
+<span class="sourceLineNo">077</span>   * This is required for comparing multiple files to find out which one has the latest<a name="line.77"></a>
+<span class="sourceLineNo">078</span>   * data. StoreFileScanners are ordered from 0 (oldest) to newest in increasing order.<a name="line.78"></a>
+<span class="sourceLineNo">079</span>   */<a name="line.79"></a>
+<span class="sourceLineNo">080</span>  default long getScannerOrder(){<a name="line.80"></a>
+<span class="sourceLineNo">081</span>    return 0;<a name="line.81"></a>
+<span class="sourceLineNo">082</span>  }<a name="line.82"></a>
 <span class="sourceLineNo">083</span><a name="line.83"></a>
 <span class="sourceLineNo">084</span>  /**<a name="line.84"></a>
 <span class="sourceLineNo">085</span>   * Close the KeyValue scanner.<a name="line.85"></a>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
index e7e7ff6..f65f30f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactorSegmentsIterator.html
@@ -64,114 +64,111 @@
 <span class="sourceLineNo">056</span>    super(compactionKVMax);<a name="line.56"></a>
 <span class="sourceLineNo">057</span><a name="line.57"></a>
 <span class="sourceLineNo">058</span>    List&lt;KeyValueScanner&gt; scanners = new ArrayList&lt;KeyValueScanner&gt;();<a name="line.58"></a>
-<span class="sourceLineNo">059</span>    // create the list of scanners to traverse over all the data<a name="line.59"></a>
-<span class="sourceLineNo">060</span>    // no dirty reads here as these are immutable segments<a name="line.60"></a>
-<span class="sourceLineNo">061</span>    int order = segments.size();<a name="line.61"></a>
-<span class="sourceLineNo">062</span>    AbstractMemStore.addToScanners(segments, Integer.MAX_VALUE, order, scanners);<a name="line.62"></a>
-<span class="sourceLineNo">063</span>    // build the scanner based on Query Matcher<a name="line.63"></a>
-<span class="sourceLineNo">064</span>    // reinitialize the compacting scanner for each instance of iterator<a name="line.64"></a>
-<span class="sourceLineNo">065</span>    compactingScanner = createScanner(store, scanners);<a name="line.65"></a>
-<span class="sourceLineNo">066</span>    refillKVS();<a name="line.66"></a>
-<span class="sourceLineNo">067</span>  }<a name="line.67"></a>
-<span class="sourceLineNo">068</span><a name="line.68"></a>
-<span class="sourceLineNo">069</span>  @Override<a name="line.69"></a>
-<span class="sourceLineNo">070</span>  public boolean hasNext() {<a name="line.70"></a>
-<span class="sourceLineNo">071</span>    if (kvsIterator == null) { // for the case when the result is empty<a name="line.71"></a>
-<span class="sourceLineNo">072</span>      return false;<a name="line.72"></a>
-<span class="sourceLineNo">073</span>    }<a name="line.73"></a>
-<span class="sourceLineNo">074</span>    // return true either we have cells in buffer or we can get more.<a name="line.74"></a>
-<span class="sourceLineNo">075</span>    return kvsIterator.hasNext() || refillKVS();<a name="line.75"></a>
-<span class="sourceLineNo">076</span>  }<a name="line.76"></a>
-<span class="sourceLineNo">077</span><a name="line.77"></a>
-<span class="sourceLineNo">078</span>  @Override<a name="line.78"></a>
-<span class="sourceLineNo">079</span>  public Cell next() {<a name="line.79"></a>
-<span class="sourceLineNo">080</span>    if (!hasNext()) {<a name="line.80"></a>
-<span class="sourceLineNo">081</span>      throw new NoSuchElementException();<a name="line.81"></a>
-<span class="sourceLineNo">082</span>    }<a name="line.82"></a>
-<span class="sourceLineNo">083</span>    return kvsIterator.next();<a name="line.83"></a>
-<span class="sourceLineNo">084</span>  }<a name="line.84"></a>
-<span class="sourceLineNo">085</span><a name="line.85"></a>
-<span class="sourceLineNo">086</span>  @Override<a name="line.86"></a>
-<span class="sourceLineNo">087</span>  public void close() {<a name="line.87"></a>
-<span class="sourceLineNo">088</span>    try {<a name="line.88"></a>
-<span class="sourceLineNo">089</span>      compactingScanner.close();<a name="line.89"></a>
-<span class="sourceLineNo">090</span>    } catch (IOException e) {<a name="line.90"></a>
-<span class="sourceLineNo">091</span>      LOG.warn("close store scanner failed", e);<a name="line.91"></a>
-<span class="sourceLineNo">092</span>    }<a name="line.92"></a>
-<span class="sourceLineNo">093</span>    compactingScanner = null;<a name="line.93"></a>
-<span class="sourceLineNo">094</span>    kvs.clear();<a name="line.94"></a>
-<span class="sourceLineNo">095</span>  }<a name="line.95"></a>
-<span class="sourceLineNo">096</span><a name="line.96"></a>
-<span class="sourceLineNo">097</span>  @Override<a name="line.97"></a>
-<span class="sourceLineNo">098</span>  public void remove() {<a name="line.98"></a>
-<span class="sourceLineNo">099</span>    throw new UnsupportedOperationException();<a name="line.99"></a>
-<span class="sourceLineNo">100</span>  }<a name="line.100"></a>
-<span class="sourceLineNo">101</span><a name="line.101"></a>
-<span class="sourceLineNo">102</span>  /**<a name="line.102"></a>
-<span class="sourceLineNo">103</span>   * Creates the scanner for compacting the pipeline.<a name="line.103"></a>
-<span class="sourceLineNo">104</span>   * @return the scanner<a name="line.104"></a>
-<span class="sourceLineNo">105</span>   */<a name="line.105"></a>
-<span class="sourceLineNo">106</span>  private InternalScanner createScanner(HStore store, List&lt;KeyValueScanner&gt; scanners)<a name="line.106"></a>
-<span class="sourceLineNo">107</span>      throws IOException {<a name="line.107"></a>
-<span class="sourceLineNo">108</span>    InternalScanner scanner = null;<a name="line.108"></a>
-<span class="sourceLineNo">109</span>    boolean success = false;<a name="line.109"></a>
-<span class="sourceLineNo">110</span>    try {<a name="line.110"></a>
-<span class="sourceLineNo">111</span>      RegionCoprocessorHost cpHost = store.getCoprocessorHost();<a name="line.111"></a>
-<span class="sourceLineNo">112</span>      ScanInfo scanInfo;<a name="line.112"></a>
-<span class="sourceLineNo">113</span>      if (cpHost != null) {<a name="line.113"></a>
-<span class="sourceLineNo">114</span>        scanInfo = cpHost.preMemStoreCompactionCompactScannerOpen(store);<a name="line.114"></a>
-<span class="sourceLineNo">115</span>      } else {<a name="line.115"></a>
-<span class="sourceLineNo">116</span>        scanInfo = store.getScanInfo();<a name="line.116"></a>
-<span class="sourceLineNo">117</span>      }<a name="line.117"></a>
-<span class="sourceLineNo">118</span>      scanner = new StoreScanner(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES,<a name="line.118"></a>
-<span class="sourceLineNo">119</span>          store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);<a name="line.119"></a>
-<span class="sourceLineNo">120</span>      if (cpHost != null) {<a name="line.120"></a>
-<span class="sourceLineNo">121</span>        InternalScanner scannerFromCp = cpHost.preMemStoreCompactionCompact(store, scanner);<a name="line.121"></a>
-<span class="sourceLineNo">122</span>        if (scannerFromCp == null) {<a name="line.122"></a>
-<span class="sourceLineNo">123</span>          throw new CoprocessorException("Got a null InternalScanner when calling" +<a name="line.123"></a>
-<span class="sourceLineNo">124</span>              " preMemStoreCompactionCompact which is not acceptable");<a name="line.124"></a>
-<span class="sourceLineNo">125</span>        }<a name="line.125"></a>
+<span class="sourceLineNo">059</span>    AbstractMemStore.addToScanners(segments, Integer.MAX_VALUE, scanners);<a name="line.59"></a>
+<span class="sourceLineNo">060</span>    // build the scanner based on Query Matcher<a name="line.60"></a>
+<span class="sourceLineNo">061</span>    // reinitialize the compacting scanner for each instance of iterator<a name="line.61"></a>
+<span class="sourceLineNo">062</span>    compactingScanner = createScanner(store, scanners);<a name="line.62"></a>
+<span class="sourceLineNo">063</span>    refillKVS();<a name="line.63"></a>
+<span class="sourceLineNo">064</span>  }<a name="line.64"></a>
+<span class="sourceLineNo">065</span><a name="line.65"></a>
+<span class="sourceLineNo">066</span>  @Override<a name="line.66"></a>
+<span class="sourceLineNo">067</span>  public boolean hasNext() {<a name="line.67"></a>
+<span class="sourceLineNo">068</span>    if (kvsIterator == null) { // for the case when the result is empty<a name="line.68"></a>
+<span class="sourceLineNo">069</span>      return false;<a name="line.69"></a>
+<span class="sourceLineNo">070</span>    }<a name="line.70"></a>
+<span class="sourceLineNo">071</span>    // return true either we have cells in buffer or we can get more.<a name="line.71"></a>
+<span class="sourceLineNo">072</span>    return kvsIterator.hasNext() || refillKVS();<a name="line.72"></a>
+<span class="sourceLineNo">073</span>  }<a name="line.73"></a>
+<span class="sourceLineNo">074</span><a name="line.74"></a>
+<span class="sourceLineNo">075</span>  @Override<a name="line.75"></a>
+<span class="sourceLineNo">076</span>  public Cell next() {<a name="line.76"></a>
+<span class="sourceLineNo">077</span>    if (!hasNext()) {<a name="line.77"></a>
+<span class="sourceLineNo">078</span>      throw new NoSuchElementException();<a name="line.78"></a>
+<span class="sourceLineNo">079</span>    }<a name="line.79"></a>
+<span class="sourceLineNo">080</span>    return kvsIterator.next();<a name="line.80"></a>
+<span class="sourceLineNo">081</span>  }<a name="line.81"></a>
+<span class="sourceLineNo">082</span><a name="line.82"></a>
+<span class="sourceLineNo">083</span>  @Override<a name="line.83"></a>
+<span class="sourceLineNo">084</span>  public void close() {<a name="line.84"></a>
+<span class="sourceLineNo">085</span>    try {<a name="line.85"></a>
+<span class="sourceLineNo">086</span>      compactingScanner.close();<a name="line.86"></a>
+<span class="sourceLineNo">087</span>    } catch (IOException e) {<a name="line.87"></a>
+<span class="sourceLineNo">088</span>      LOG.warn("close store scanner failed", e);<a name="line.88"></a>
+<span class="sourceLineNo">089</span>    }<a name="line.89"></a>
+<span class="sourceLineNo">090</span>    compactingScanner = null;<a name="line.90"></a>
+<span class="sourceLineNo">091</span>    kvs.clear();<a name="line.91"></a>
+<span class="sourceLineNo">092</span>  }<a name="line.92"></a>
+<span class="sourceLineNo">093</span><a name="line.93"></a>
+<span class="sourceLineNo">094</span>  @Override<a name="line.94"></a>
+<span class="sourceLineNo">095</span>  public void remove() {<a name="line.95"></a>
+<span class="sourceLineNo">096</span>    throw new UnsupportedOperationException();<a name="line.96"></a>
+<span class="sourceLineNo">097</span>  }<a name="line.97"></a>
+<span class="sourceLineNo">098</span><a name="line.98"></a>
+<span class="sourceLineNo">099</span>  /**<a name="line.99"></a>
+<span class="sourceLineNo">100</span>   * Creates the scanner for compacting the pipeline.<a name="line.100"></a>
+<span class="sourceLineNo">101</span>   * @return the scanner<a name="line.101"></a>
+<span class="sourceLineNo">102</span>   */<a name="line.102"></a>
+<span class="sourceLineNo">103</span>  private InternalScanner createScanner(HStore store, List&lt;KeyValueScanner&gt; scanners)<a name="line.103"></a>
+<span class="sourceLineNo">104</span>      throws IOException {<a name="line.104"></a>
+<span class="sourceLineNo">105</span>    InternalScanner scanner = null;<a name="line.105"></a>
+<span class="sourceLineNo">106</span>    boolean success = false;<a name="line.106"></a>
+<span class="sourceLineNo">107</span>    try {<a name="line.107"></a>
+<span class="sourceLineNo">108</span>      RegionCoprocessorHost cpHost = store.getCoprocessorHost();<a name="line.108"></a>
+<span class="sourceLineNo">109</span>      ScanInfo scanInfo;<a name="line.109"></a>
+<span class="sourceLineNo">110</span>      if (cpHost != null) {<a name="line.110"></a>
+<span class="sourceLineNo">111</span>        scanInfo = cpHost.preMemStoreCompactionCompactScannerOpen(store);<a name="line.111"></a>
+<span class="sourceLineNo">112</span>      } else {<a name="line.112"></a>
+<span class="sourceLineNo">113</span>        scanInfo = store.getScanInfo();<a name="line.113"></a>
+<span class="sourceLineNo">114</span>      }<a name="line.114"></a>
+<span class="sourceLineNo">115</span>      scanner = new StoreScanner(store, scanInfo, scanners, ScanType.COMPACT_RETAIN_DELETES,<a name="line.115"></a>
+<span class="sourceLineNo">116</span>          store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);<a name="line.116"></a>
+<span class="sourceLineNo">117</span>      if (cpHost != null) {<a name="line.117"></a>
+<span class="sourceLineNo">118</span>        InternalScanner scannerFromCp = cpHost.preMemStoreCompactionCompact(store, scanner);<a name="line.118"></a>
+<span class="sourceLineNo">119</span>        if (scannerFromCp == null) {<a name="line.119"></a>
+<span class="sourceLineNo">120</span>          throw new CoprocessorException("Got a null InternalScanner when calling" +<a name="line.120"></a>
+<span class="sourceLineNo">121</span>              " preMemStoreCompactionCompact which is not acceptable");<a name="line.121"></a>
+<span class="sourceLineNo">122</span>        }<a name="line.122"></a>
+<span class="sourceLineNo">123</span>        success = true;<a name="line.123"></a>
+<span class="sourceLineNo">124</span>        return scannerFromCp;<a name="line.124"></a>
+<span class="sourceLineNo">125</span>      } else {<a name="line.125"></a>
 <span class="sourceLineNo">126</span>        success = true;<a name="line.126"></a>
-<span class="sourceLineNo">127</span>        return scannerFromCp;<a name="line.127"></a>
-<span class="sourceLineNo">128</span>      } else {<a name="line.128"></a>
-<span class="sourceLineNo">129</span>        success = true;<a name="line.129"></a>
-<span class="sourceLineNo">130</span>        return scanner;<a name="line.130"></a>
-<span class="sourceLineNo">131</span>      }<a name="line.131"></a>
-<span class="sourceLineNo">132</span>    } finally {<a name="line.132"></a>
-<span class="sourceLineNo">133</span>      if (!success) {<a name="line.133"></a>
-<span class="sourceLineNo">134</span>        Closeables.close(scanner, true);<a name="line.134"></a>
-<span class="sourceLineNo">135</span>        scanners.forEach(KeyValueScanner::close);<a name="line.135"></a>
-<span class="sourceLineNo">136</span>      }<a name="line.136"></a>
-<span class="sourceLineNo">137</span>    }<a name="line.137"></a>
-<span class="sourceLineNo">138</span>  }<a name="line.138"></a>
-<span class="sourceLineNo">139</span><a name="line.139"></a>
-<span class="sourceLineNo">140</span>  /*<a name="line.140"></a>
-<span class="sourceLineNo">141</span>   * Refill kev-value set (should be invoked only when KVS is empty) Returns true if KVS is<a name="line.141"></a>
-<span class="sourceLineNo">142</span>   * non-empty<a name="line.142"></a>
-<span class="sourceLineNo">143</span>   */<a name="line.143"></a>
-<span class="sourceLineNo">144</span>  private boolean refillKVS() {<a name="line.144"></a>
-<span class="sourceLineNo">145</span>    // if there is nothing expected next in compactingScanner<a name="line.145"></a>
-<span class="sourceLineNo">146</span>    if (!hasMore) {<a name="line.146"></a>
-<span class="sourceLineNo">147</span>      return false;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>    }<a name="line.148"></a>
-<span class="sourceLineNo">149</span>    // clear previous KVS, first initiated in the constructor<a name="line.149"></a>
-<span class="sourceLineNo">150</span>    kvs.clear();<a name="line.150"></a>
-<span class="sourceLineNo">151</span>    for (;;) {<a name="line.151"></a>
-<span class="sourceLineNo">152</span>      try {<a name="line.152"></a>
-<span class="sourceLineNo">153</span>        hasMore = compactingScanner.next(kvs, scannerContext);<a name="line.153"></a>
-<span class="sourceLineNo">154</span>      } catch (IOException e) {<a name="line.154"></a>
-<span class="sourceLineNo">155</span>        // should not happen as all data are in memory<a name="line.155"></a>
-<span class="sourceLineNo">156</span>        throw new IllegalStateException(e);<a name="line.156"></a>
-<span class="sourceLineNo">157</span>      }<a name="line.157"></a>
-<span class="sourceLineNo">158</span>      if (!kvs.isEmpty()) {<a name="line.158"></a>
-<span class="sourceLineNo">159</span>        kvsIterator = kvs.iterator();<a name="line.159"></a>
-<span class="sourceLineNo">160</span>        return true;<a name="line.160"></a>
-<span class="sourceLineNo">161</span>      } else if (!hasMore) {<a name="line.161"></a>
-<span class="sourceLineNo">162</span>        return false;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>      }<a name="line.163"></a>
-<span class="sourceLineNo">164</span>    }<a name="line.164"></a>
-<span class="sourceLineNo">165</span>  }<a name="line.165"></a>
-<span class="sourceLineNo">166</span>}<a name="line.166"></a>
+<span class="sourceLineNo">127</span>        return scanner;<a name="line.127"></a>
+<span class="sourceLineNo">128</span>      }<a name="line.128"></a>
+<span class="sourceLineNo">129</span>    } finally {<a name="line.129"></a>
+<span class="sourceLineNo">130</span>      if (!success) {<a name="line.130"></a>
+<span class="sourceLineNo">131</span>        Closeables.close(scanner, true);<a name="line.131"></a>
+<span class="sourceLineNo">132</span>        scanners.forEach(KeyValueScanner::close);<a name="line.132"></a>
+<span class="sourceLineNo">133</span>      }<a name="line.133"></a>
+<span class="sourceLineNo">134</span>    }<a name="line.134"></a>
+<span class="sourceLineNo">135</span>  }<a name="line.135"></a>
+<span class="sourceLineNo">136</span><a name="line.136"></a>
+<span class="sourceLineNo">137</span>  /*<a name="line.137"></a>
+<span class="sourceLineNo">138</span>   * Refill kev-value set (should be invoked only when KVS is empty) Returns true if KVS is<a name="line.138"></a>
+<span class="sourceLineNo">139</span>   * non-empty<a name="line.139"></a>
+<span class="sourceLineNo">140</span>   */<a name="line.140"></a>
+<span class="sourceLineNo">141</span>  private boolean refillKVS() {<a name="line.141"></a>
+<span class="sourceLineNo">142</span>    // if there is nothing expected next in compactingScanner<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    if (!hasMore) {<a name="line.143"></a>
+<span class="sourceLineNo">144</span>      return false;<a name="line.144"></a>
+<span class="sourceLineNo">145</span>    }<a name="line.145"></a>
+<span class="sourceLineNo">146</span>    // clear previous KVS, first initiated in the constructor<a name="line.146"></a>
+<span class="sourceLineNo">147</span>    kvs.clear();<a name="line.147"></a>
+<span class="sourceLineNo">148</span>    for (;;) {<a name="line.148"></a>
+<span class="sourceLineNo">149</span>      try {<a name="line.149"></a>
+<span class="sourceLineNo">150</span>        hasMore = compactingScanner.next(kvs, scannerContext);<a name="line.150"></a>
+<span class="sourceLineNo">151</span>      } catch (IOException e) {<a name="line.151"></a>
+<span class="sourceLineNo">152</span>        // should not happen as all data are in memory<a name="line.152"></a>
+<span class="sourceLineNo">153</span>        throw new IllegalStateException(e);<a name="line.153"></a>
+<span class="sourceLineNo">154</span>      }<a name="line.154"></a>
+<span class="sourceLineNo">155</span>      if (!kvs.isEmpty()) {<a name="line.155"></a>
+<span class="sourceLineNo">156</span>        kvsIterator = kvs.iterator();<a name="line.156"></a>
+<span class="sourceLineNo">157</span>        return true;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>      } else if (!hasMore) {<a name="line.158"></a>
+<span class="sourceLineNo">159</span>        return false;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>      }<a name="line.160"></a>
+<span class="sourceLineNo">161</span>    }<a name="line.161"></a>
+<span class="sourceLineNo">162</span>  }<a name="line.162"></a>
+<span class="sourceLineNo">163</span>}<a name="line.163"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
index 2b4dbc8..ae5169e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.html
@@ -55,58 +55,57 @@
 <span class="sourceLineNo">047</span>    super(compactionKVMax);<a name="line.47"></a>
 <span class="sourceLineNo">048</span>    // create the list of scanners to traverse over all the data<a name="line.48"></a>
 <span class="sourceLineNo">049</span>    // no dirty reads here as these are immutable segments<a name="line.49"></a>
-<span class="sourceLineNo">050</span>    int order = segments.size();<a name="line.50"></a>
-<span class="sourceLineNo">051</span>    AbstractMemStore.addToScanners(segments, Integer.MAX_VALUE, order, scanners);<a name="line.51"></a>
-<span class="sourceLineNo">052</span>    heap = new KeyValueHeap(scanners, comparator);<a name="line.52"></a>
-<span class="sourceLineNo">053</span>  }<a name="line.53"></a>
-<span class="sourceLineNo">054</span><a name="line.54"></a>
-<span class="sourceLineNo">055</span>  @Override<a name="line.55"></a>
-<span class="sourceLineNo">056</span>  public boolean hasNext() {<a name="line.56"></a>
-<span class="sourceLineNo">057</span>    if (closed) {<a name="line.57"></a>
-<span class="sourceLineNo">058</span>      return false;<a name="line.58"></a>
-<span class="sourceLineNo">059</span>    }<a name="line.59"></a>
-<span class="sourceLineNo">060</span>    if (this.heap != null) {<a name="line.60"></a>
-<span class="sourceLineNo">061</span>      return (this.heap.peek() != null);<a name="line.61"></a>
-<span class="sourceLineNo">062</span>    }<a name="line.62"></a>
-<span class="sourceLineNo">063</span>    // Doing this way in case some test cases tries to peek directly<a name="line.63"></a>
-<span class="sourceLineNo">064</span>    return false;<a name="line.64"></a>
-<span class="sourceLineNo">065</span>  }<a name="line.65"></a>
-<span class="sourceLineNo">066</span><a name="line.66"></a>
-<span class="sourceLineNo">067</span>  @Override<a name="line.67"></a>
-<span class="sourceLineNo">068</span>  public Cell next()  {<a name="line.68"></a>
-<span class="sourceLineNo">069</span>    try {                 // try to get next<a name="line.69"></a>
-<span class="sourceLineNo">070</span>      if (!closed &amp;&amp; heap != null) {<a name="line.70"></a>
-<span class="sourceLineNo">071</span>        return heap.next();<a name="line.71"></a>
-<span class="sourceLineNo">072</span>      }<a name="line.72"></a>
-<span class="sourceLineNo">073</span>    } catch (IOException ie) {<a name="line.73"></a>
-<span class="sourceLineNo">074</span>      throw new IllegalStateException(ie);<a name="line.74"></a>
-<span class="sourceLineNo">075</span>    }<a name="line.75"></a>
-<span class="sourceLineNo">076</span>    return null;<a name="line.76"></a>
-<span class="sourceLineNo">077</span>  }<a name="line.77"></a>
-<span class="sourceLineNo">078</span><a name="line.78"></a>
-<span class="sourceLineNo">079</span>  @Override<a name="line.79"></a>
-<span class="sourceLineNo">080</span>  public void close() {<a name="line.80"></a>
-<span class="sourceLineNo">081</span>    if (closed) {<a name="line.81"></a>
-<span class="sourceLineNo">082</span>      return;<a name="line.82"></a>
-<span class="sourceLineNo">083</span>    }<a name="line.83"></a>
-<span class="sourceLineNo">084</span>    // Ensuring that all the segment scanners are closed<a name="line.84"></a>
-<span class="sourceLineNo">085</span>    if (heap != null) {<a name="line.85"></a>
-<span class="sourceLineNo">086</span>      heap.close();<a name="line.86"></a>
-<span class="sourceLineNo">087</span>      // It is safe to do close as no new calls will be made to this scanner.<a name="line.87"></a>
-<span class="sourceLineNo">088</span>      heap = null;<a name="line.88"></a>
-<span class="sourceLineNo">089</span>    } else {<a name="line.89"></a>
-<span class="sourceLineNo">090</span>      for (KeyValueScanner scanner : scanners) {<a name="line.90"></a>
-<span class="sourceLineNo">091</span>        scanner.close();<a name="line.91"></a>
-<span class="sourceLineNo">092</span>      }<a name="line.92"></a>
-<span class="sourceLineNo">093</span>    }<a name="line.93"></a>
-<span class="sourceLineNo">094</span>    closed = true;<a name="line.94"></a>
-<span class="sourceLineNo">095</span>  }<a name="line.95"></a>
-<span class="sourceLineNo">096</span><a name="line.96"></a>
-<span class="sourceLineNo">097</span>  @Override<a name="line.97"></a>
-<span class="sourceLineNo">098</span>  public void remove() {<a name="line.98"></a>
-<span class="sourceLineNo">099</span>    throw new UnsupportedOperationException();<a name="line.99"></a>
-<span class="sourceLineNo">100</span>  }<a name="line.100"></a>
-<span class="sourceLineNo">101</span>}<a name="line.101"></a>
+<span class="sourceLineNo">050</span>    AbstractMemStore.addToScanners(segments, Integer.MAX_VALUE, scanners);<a name="line.50"></a>
+<span class="sourceLineNo">051</span>    heap = new KeyValueHeap(scanners, comparator);<a name="line.51"></a>
+<span class="sourceLineNo">052</span>  }<a name="line.52"></a>
+<span class="sourceLineNo">053</span><a name="line.53"></a>
+<span class="sourceLineNo">054</span>  @Override<a name="line.54"></a>
+<span class="sourceLineNo">055</span>  public boolean hasNext() {<a name="line.55"></a>
+<span class="sourceLineNo">056</span>    if (closed) {<a name="line.56"></a>
+<span class="sourceLineNo">057</span>      return false;<a name="line.57"></a>
+<span class="sourceLineNo">058</span>    }<a name="line.58"></a>
+<span class="sourceLineNo">059</span>    if (this.heap != null) {<a name="line.59"></a>
+<span class="sourceLineNo">060</span>      return (this.heap.peek() != null);<a name="line.60"></a>
+<span class="sourceLineNo">061</span>    }<a name="line.61"></a>
+<span class="sourceLineNo">062</span>    // Doing this way in case some test cases tries to peek directly<a name="line.62"></a>
+<span class="sourceLineNo">063</span>    return false;<a name="line.63"></a>
+<span class="sourceLineNo">064</span>  }<a name="line.64"></a>
+<span class="sourceLineNo">065</span><a name="line.65"></a>
+<span class="sourceLineNo">066</span>  @Override<a name="line.66"></a>
+<span class="sourceLineNo">067</span>  public Cell next()  {<a name="line.67"></a>
+<span class="sourceLineNo">068</span>    try {                 // try to get next<a name="line.68"></a>
+<span class="sourceLineNo">069</span>      if (!closed &amp;&amp; heap != null) {<a name="line.69"></a>
+<span class="sourceLineNo">070</span>        return heap.next();<a name="line.70"></a>
+<span class="sourceLineNo">071</span>      }<a name="line.71"></a>
+<span class="sourceLineNo">072</span>    } catch (IOException ie) {<a name="line.72"></a>
+<span class="sourceLineNo">073</span>      throw new IllegalStateException(ie);<a name="line.73"></a>
+<span class="sourceLineNo">074</span>    }<a name="line.74"></a>
+<span class="sourceLineNo">075</span>    return null;<a name="line.75"></a>
+<span class="sourceLineNo">076</span>  }<a name="line.76"></a>
+<span class="sourceLineNo">077</span><a name="line.77"></a>
+<span class="sourceLineNo">078</span>  @Override<a name="line.78"></a>
+<span class="sourceLineNo">079</span>  public void close() {<a name="line.79"></a>
+<span class="sourceLineNo">080</span>    if (closed) {<a name="line.80"></a>
+<span class="sourceLineNo">081</span>      return;<a name="line.81"></a>
+<span class="sourceLineNo">082</span>    }<a name="line.82"></a>
+<span class="sourceLineNo">083</span>    // Ensuring that all the segment scanners are closed<a name="line.83"></a>
+<span class="sourceLineNo">084</span>    if (heap != null) {<a name="line.84"></a>
+<span class="sourceLineNo">085</span>      heap.close();<a name="line.85"></a>
+<span class="sourceLineNo">086</span>      // It is safe to do close as no new calls will be made to this scanner.<a name="line.86"></a>
+<span class="sourceLineNo">087</span>      heap = null;<a name="line.87"></a>
+<span class="sourceLineNo">088</span>    } else {<a name="line.88"></a>
+<span class="sourceLineNo">089</span>      for (KeyValueScanner scanner : scanners) {<a name="line.89"></a>
+<span class="sourceLineNo">090</span>        scanner.close();<a name="line.90"></a>
+<span class="sourceLineNo">091</span>      }<a name="line.91"></a>
+<span class="sourceLineNo">092</span>    }<a name="line.92"></a>
+<span class="sourceLineNo">093</span>    closed = true;<a name="line.93"></a>
+<span class="sourceLineNo">094</span>  }<a name="line.94"></a>
+<span class="sourceLineNo">095</span><a name="line.95"></a>
+<span class="sourceLineNo">096</span>  @Override<a name="line.96"></a>
+<span class="sourceLineNo">097</span>  public void remove() {<a name="line.97"></a>
+<span class="sourceLineNo">098</span>    throw new UnsupportedOperationException();<a name="line.98"></a>
+<span class="sourceLineNo">099</span>  }<a name="line.99"></a>
+<span class="sourceLineNo">100</span>}<a name="line.100"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ede30993/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html
index f3faae4..b8b1115 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.html
@@ -48,7 +48,7 @@
 <span class="sourceLineNo">040</span>    this.cellsCount = snapshot.getCellsCount();<a name="line.40"></a>
 <span class="sourceLineNo">041</span>    this.memStoreSize = snapshot.getMemStoreSize();<a name="line.41"></a>
 <span class="sourceLineNo">042</span>    this.timeRangeTracker = snapshot.getTimeRangeTracker();<a name="line.42"></a>
-<span class="sourceLineNo">043</span>    this.scanners = snapshot.getScanners(Long.MAX_VALUE, Long.MAX_VALUE);<a name="line.43"></a>
+<span class="sourceLineNo">043</span>    this.scanners = snapshot.getScanners(Long.MAX_VALUE);<a name="line.43"></a>
 <span class="sourceLineNo">044</span>    this.tagsPresent = snapshot.isTagsPresent();<a name="line.44"></a>
 <span class="sourceLineNo">045</span>  }<a name="line.45"></a>
 <span class="sourceLineNo">046</span><a name="line.46"></a>