You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by gi...@apache.org on 2018/08/25 14:47:52 UTC

[01/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b8bc22fd3 -> c7b180e23


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMaster.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMaster.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMaster.html
index 1f9562b..387de47 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMaster.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMaster.html
@@ -27,214 +27,252 @@
 <span class="sourceLineNo">019</span><a name="line.19"></a>
 <span class="sourceLineNo">020</span>import static org.junit.Assert.assertArrayEquals;<a name="line.20"></a>
 <span class="sourceLineNo">021</span>import static org.junit.Assert.assertEquals;<a name="line.21"></a>
-<span class="sourceLineNo">022</span>import static org.junit.Assert.assertTrue;<a name="line.22"></a>
-<span class="sourceLineNo">023</span>import static org.junit.Assert.fail;<a name="line.23"></a>
-<span class="sourceLineNo">024</span><a name="line.24"></a>
-<span class="sourceLineNo">025</span>import java.io.IOException;<a name="line.25"></a>
-<span class="sourceLineNo">026</span>import java.util.List;<a name="line.26"></a>
-<span class="sourceLineNo">027</span>import java.util.Map;<a name="line.27"></a>
-<span class="sourceLineNo">028</span>import org.apache.hadoop.conf.Configuration;<a name="line.28"></a>
-<span class="sourceLineNo">029</span>import org.apache.hadoop.hbase.HBaseClassTestRule;<a name="line.29"></a>
-<span class="sourceLineNo">030</span>import org.apache.hadoop.hbase.HBaseTestingUtility;<a name="line.30"></a>
-<span class="sourceLineNo">031</span>import org.apache.hadoop.hbase.HColumnDescriptor;<a name="line.31"></a>
-<span class="sourceLineNo">032</span>import org.apache.hadoop.hbase.HConstants;<a name="line.32"></a>
-<span class="sourceLineNo">033</span>import org.apache.hadoop.hbase.HTableDescriptor;<a name="line.33"></a>
-<span class="sourceLineNo">034</span>import org.apache.hadoop.hbase.MetaTableAccessor;<a name="line.34"></a>
-<span class="sourceLineNo">035</span>import org.apache.hadoop.hbase.MiniHBaseCluster;<a name="line.35"></a>
-<span class="sourceLineNo">036</span>import org.apache.hadoop.hbase.PleaseHoldException;<a name="line.36"></a>
-<span class="sourceLineNo">037</span>import org.apache.hadoop.hbase.ServerName;<a name="line.37"></a>
-<span class="sourceLineNo">038</span>import org.apache.hadoop.hbase.TableName;<a name="line.38"></a>
-<span class="sourceLineNo">039</span>import org.apache.hadoop.hbase.UnknownRegionException;<a name="line.39"></a>
-<span class="sourceLineNo">040</span>import org.apache.hadoop.hbase.client.Admin;<a name="line.40"></a>
-<span class="sourceLineNo">041</span>import org.apache.hadoop.hbase.client.RegionInfo;<a name="line.41"></a>
-<span class="sourceLineNo">042</span>import org.apache.hadoop.hbase.client.RegionInfoBuilder;<a name="line.42"></a>
-<span class="sourceLineNo">043</span>import org.apache.hadoop.hbase.client.Table;<a name="line.43"></a>
-<span class="sourceLineNo">044</span>import org.apache.hadoop.hbase.client.TableState;<a name="line.44"></a>
-<span class="sourceLineNo">045</span>import org.apache.hadoop.hbase.master.assignment.RegionStates;<a name="line.45"></a>
-<span class="sourceLineNo">046</span>import org.apache.hadoop.hbase.testclassification.MasterTests;<a name="line.46"></a>
-<span class="sourceLineNo">047</span>import org.apache.hadoop.hbase.testclassification.MediumTests;<a name="line.47"></a>
-<span class="sourceLineNo">048</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.48"></a>
-<span class="sourceLineNo">049</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.49"></a>
-<span class="sourceLineNo">050</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.50"></a>
-<span class="sourceLineNo">051</span>import org.apache.hadoop.util.StringUtils;<a name="line.51"></a>
-<span class="sourceLineNo">052</span>import org.junit.AfterClass;<a name="line.52"></a>
-<span class="sourceLineNo">053</span>import org.junit.BeforeClass;<a name="line.53"></a>
-<span class="sourceLineNo">054</span>import org.junit.ClassRule;<a name="line.54"></a>
-<span class="sourceLineNo">055</span>import org.junit.Rule;<a name="line.55"></a>
-<span class="sourceLineNo">056</span>import org.junit.Test;<a name="line.56"></a>
-<span class="sourceLineNo">057</span>import org.junit.experimental.categories.Category;<a name="line.57"></a>
-<span class="sourceLineNo">058</span>import org.junit.rules.TestName;<a name="line.58"></a>
-<span class="sourceLineNo">059</span>import org.slf4j.Logger;<a name="line.59"></a>
-<span class="sourceLineNo">060</span>import org.slf4j.LoggerFactory;<a name="line.60"></a>
-<span class="sourceLineNo">061</span><a name="line.61"></a>
-<span class="sourceLineNo">062</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.62"></a>
-<span class="sourceLineNo">063</span><a name="line.63"></a>
-<span class="sourceLineNo">064</span>@Category({MasterTests.class, MediumTests.class})<a name="line.64"></a>
-<span class="sourceLineNo">065</span>public class TestMaster {<a name="line.65"></a>
+<span class="sourceLineNo">022</span>import static org.junit.Assert.assertFalse;<a name="line.22"></a>
+<span class="sourceLineNo">023</span>import static org.junit.Assert.assertTrue;<a name="line.23"></a>
+<span class="sourceLineNo">024</span>import static org.junit.Assert.fail;<a name="line.24"></a>
+<span class="sourceLineNo">025</span><a name="line.25"></a>
+<span class="sourceLineNo">026</span>import java.io.IOException;<a name="line.26"></a>
+<span class="sourceLineNo">027</span>import java.util.List;<a name="line.27"></a>
+<span class="sourceLineNo">028</span>import java.util.Map;<a name="line.28"></a>
+<span class="sourceLineNo">029</span>import org.apache.hadoop.conf.Configuration;<a name="line.29"></a>
+<span class="sourceLineNo">030</span>import org.apache.hadoop.fs.FileSystem;<a name="line.30"></a>
+<span class="sourceLineNo">031</span>import org.apache.hadoop.fs.Path;<a name="line.31"></a>
+<span class="sourceLineNo">032</span>import org.apache.hadoop.hbase.HBaseClassTestRule;<a name="line.32"></a>
+<span class="sourceLineNo">033</span>import org.apache.hadoop.hbase.HBaseTestingUtility;<a name="line.33"></a>
+<span class="sourceLineNo">034</span>import org.apache.hadoop.hbase.HColumnDescriptor;<a name="line.34"></a>
+<span class="sourceLineNo">035</span>import org.apache.hadoop.hbase.HConstants;<a name="line.35"></a>
+<span class="sourceLineNo">036</span>import org.apache.hadoop.hbase.HTableDescriptor;<a name="line.36"></a>
+<span class="sourceLineNo">037</span>import org.apache.hadoop.hbase.MetaTableAccessor;<a name="line.37"></a>
+<span class="sourceLineNo">038</span>import org.apache.hadoop.hbase.MiniHBaseCluster;<a name="line.38"></a>
+<span class="sourceLineNo">039</span>import org.apache.hadoop.hbase.PleaseHoldException;<a name="line.39"></a>
+<span class="sourceLineNo">040</span>import org.apache.hadoop.hbase.ServerName;<a name="line.40"></a>
+<span class="sourceLineNo">041</span>import org.apache.hadoop.hbase.TableName;<a name="line.41"></a>
+<span class="sourceLineNo">042</span>import org.apache.hadoop.hbase.UnknownRegionException;<a name="line.42"></a>
+<span class="sourceLineNo">043</span>import org.apache.hadoop.hbase.client.Admin;<a name="line.43"></a>
+<span class="sourceLineNo">044</span>import org.apache.hadoop.hbase.client.RegionInfo;<a name="line.44"></a>
+<span class="sourceLineNo">045</span>import org.apache.hadoop.hbase.client.RegionInfoBuilder;<a name="line.45"></a>
+<span class="sourceLineNo">046</span>import org.apache.hadoop.hbase.client.Table;<a name="line.46"></a>
+<span class="sourceLineNo">047</span>import org.apache.hadoop.hbase.client.TableState;<a name="line.47"></a>
+<span class="sourceLineNo">048</span>import org.apache.hadoop.hbase.testclassification.MasterTests;<a name="line.48"></a>
+<span class="sourceLineNo">049</span>import org.apache.hadoop.hbase.testclassification.MediumTests;<a name="line.49"></a>
+<span class="sourceLineNo">050</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.50"></a>
+<span class="sourceLineNo">051</span>import org.apache.hadoop.hbase.util.HBaseFsck;<a name="line.51"></a>
+<span class="sourceLineNo">052</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.52"></a>
+<span class="sourceLineNo">053</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.53"></a>
+<span class="sourceLineNo">054</span>import org.apache.hadoop.util.StringUtils;<a name="line.54"></a>
+<span class="sourceLineNo">055</span>import org.junit.AfterClass;<a name="line.55"></a>
+<span class="sourceLineNo">056</span>import org.junit.BeforeClass;<a name="line.56"></a>
+<span class="sourceLineNo">057</span>import org.junit.ClassRule;<a name="line.57"></a>
+<span class="sourceLineNo">058</span>import org.junit.Rule;<a name="line.58"></a>
+<span class="sourceLineNo">059</span>import org.junit.Test;<a name="line.59"></a>
+<span class="sourceLineNo">060</span>import org.junit.experimental.categories.Category;<a name="line.60"></a>
+<span class="sourceLineNo">061</span>import org.junit.rules.TestName;<a name="line.61"></a>
+<span class="sourceLineNo">062</span>import org.slf4j.Logger;<a name="line.62"></a>
+<span class="sourceLineNo">063</span>import org.slf4j.LoggerFactory;<a name="line.63"></a>
+<span class="sourceLineNo">064</span><a name="line.64"></a>
+<span class="sourceLineNo">065</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.65"></a>
 <span class="sourceLineNo">066</span><a name="line.66"></a>
-<span class="sourceLineNo">067</span>  @ClassRule<a name="line.67"></a>
-<span class="sourceLineNo">068</span>  public static final HBaseClassTestRule CLASS_RULE =<a name="line.68"></a>
-<span class="sourceLineNo">069</span>      HBaseClassTestRule.forClass(TestMaster.class);<a name="line.69"></a>
-<span class="sourceLineNo">070</span><a name="line.70"></a>
-<span class="sourceLineNo">071</span>  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();<a name="line.71"></a>
-<span class="sourceLineNo">072</span>  private static final Logger LOG = LoggerFactory.getLogger(TestMaster.class);<a name="line.72"></a>
-<span class="sourceLineNo">073</span>  private static final TableName TABLENAME =<a name="line.73"></a>
-<span class="sourceLineNo">074</span>      TableName.valueOf("TestMaster");<a name="line.74"></a>
-<span class="sourceLineNo">075</span>  private static final byte[] FAMILYNAME = Bytes.toBytes("fam");<a name="line.75"></a>
-<span class="sourceLineNo">076</span>  private static Admin admin;<a name="line.76"></a>
-<span class="sourceLineNo">077</span><a name="line.77"></a>
-<span class="sourceLineNo">078</span>  @Rule<a name="line.78"></a>
-<span class="sourceLineNo">079</span>  public TestName name = new TestName();<a name="line.79"></a>
+<span class="sourceLineNo">067</span>@Category({MasterTests.class, MediumTests.class})<a name="line.67"></a>
+<span class="sourceLineNo">068</span>public class TestMaster {<a name="line.68"></a>
+<span class="sourceLineNo">069</span><a name="line.69"></a>
+<span class="sourceLineNo">070</span>  @ClassRule<a name="line.70"></a>
+<span class="sourceLineNo">071</span>  public static final HBaseClassTestRule CLASS_RULE =<a name="line.71"></a>
+<span class="sourceLineNo">072</span>      HBaseClassTestRule.forClass(TestMaster.class);<a name="line.72"></a>
+<span class="sourceLineNo">073</span><a name="line.73"></a>
+<span class="sourceLineNo">074</span>  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();<a name="line.74"></a>
+<span class="sourceLineNo">075</span>  private static final Logger LOG = LoggerFactory.getLogger(TestMaster.class);<a name="line.75"></a>
+<span class="sourceLineNo">076</span>  private static final TableName TABLENAME =<a name="line.76"></a>
+<span class="sourceLineNo">077</span>      TableName.valueOf("TestMaster");<a name="line.77"></a>
+<span class="sourceLineNo">078</span>  private static final byte[] FAMILYNAME = Bytes.toBytes("fam");<a name="line.78"></a>
+<span class="sourceLineNo">079</span>  private static Admin admin;<a name="line.79"></a>
 <span class="sourceLineNo">080</span><a name="line.80"></a>
-<span class="sourceLineNo">081</span>  @BeforeClass<a name="line.81"></a>
-<span class="sourceLineNo">082</span>  public static void beforeAllTests() throws Exception {<a name="line.82"></a>
-<span class="sourceLineNo">083</span>    // we will retry operations when PleaseHoldException is thrown<a name="line.83"></a>
-<span class="sourceLineNo">084</span>    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);<a name="line.84"></a>
-<span class="sourceLineNo">085</span>    // Start a cluster of two regionservers.<a name="line.85"></a>
-<span class="sourceLineNo">086</span>    TEST_UTIL.startMiniCluster(2);<a name="line.86"></a>
-<span class="sourceLineNo">087</span>    admin = TEST_UTIL.getAdmin();<a name="line.87"></a>
-<span class="sourceLineNo">088</span>  }<a name="line.88"></a>
-<span class="sourceLineNo">089</span><a name="line.89"></a>
-<span class="sourceLineNo">090</span>  @AfterClass<a name="line.90"></a>
-<span class="sourceLineNo">091</span>  public static void afterAllTests() throws Exception {<a name="line.91"></a>
-<span class="sourceLineNo">092</span>    TEST_UTIL.shutdownMiniCluster();<a name="line.92"></a>
-<span class="sourceLineNo">093</span>  }<a name="line.93"></a>
-<span class="sourceLineNo">094</span><a name="line.94"></a>
-<span class="sourceLineNo">095</span>  @Test<a name="line.95"></a>
-<span class="sourceLineNo">096</span>  @SuppressWarnings("deprecation")<a name="line.96"></a>
-<span class="sourceLineNo">097</span>  public void testMasterOpsWhileSplitting() throws Exception {<a name="line.97"></a>
-<span class="sourceLineNo">098</span>    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();<a name="line.98"></a>
-<span class="sourceLineNo">099</span>    HMaster m = cluster.getMaster();<a name="line.99"></a>
-<span class="sourceLineNo">100</span><a name="line.100"></a>
-<span class="sourceLineNo">101</span>    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {<a name="line.101"></a>
-<span class="sourceLineNo">102</span>      assertTrue(m.getTableStateManager().isTableState(TABLENAME, TableState.State.ENABLED));<a name="line.102"></a>
-<span class="sourceLineNo">103</span>      TEST_UTIL.loadTable(ht, FAMILYNAME, false);<a name="line.103"></a>
-<span class="sourceLineNo">104</span>    }<a name="line.104"></a>
-<span class="sourceLineNo">105</span><a name="line.105"></a>
-<span class="sourceLineNo">106</span>    List&lt;Pair&lt;RegionInfo, ServerName&gt;&gt; tableRegions = MetaTableAccessor.getTableRegionsAndLocations(<a name="line.106"></a>
-<span class="sourceLineNo">107</span>        m.getConnection(), TABLENAME);<a name="line.107"></a>
-<span class="sourceLineNo">108</span>    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));<a name="line.108"></a>
-<span class="sourceLineNo">109</span>    assertEquals(1, tableRegions.size());<a name="line.109"></a>
-<span class="sourceLineNo">110</span>    assertArrayEquals(HConstants.EMPTY_START_ROW,<a name="line.110"></a>
-<span class="sourceLineNo">111</span>        tableRegions.get(0).getFirst().getStartKey());<a name="line.111"></a>
-<span class="sourceLineNo">112</span>    assertArrayEquals(HConstants.EMPTY_END_ROW,<a name="line.112"></a>
-<span class="sourceLineNo">113</span>        tableRegions.get(0).getFirst().getEndKey());<a name="line.113"></a>
-<span class="sourceLineNo">114</span><a name="line.114"></a>
-<span class="sourceLineNo">115</span>    // Now trigger a split and stop when the split is in progress<a name="line.115"></a>
-<span class="sourceLineNo">116</span>    LOG.info("Splitting table");<a name="line.116"></a>
-<span class="sourceLineNo">117</span>    TEST_UTIL.getAdmin().split(TABLENAME);<a name="line.117"></a>
-<span class="sourceLineNo">118</span><a name="line.118"></a>
-<span class="sourceLineNo">119</span>    LOG.info("Making sure we can call getTableRegions while opening");<a name="line.119"></a>
-<span class="sourceLineNo">120</span>    while (tableRegions.size() &lt; 3) {<a name="line.120"></a>
-<span class="sourceLineNo">121</span>      tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(),<a name="line.121"></a>
-<span class="sourceLineNo">122</span>          TABLENAME, false);<a name="line.122"></a>
-<span class="sourceLineNo">123</span>      Thread.sleep(100);<a name="line.123"></a>
-<span class="sourceLineNo">124</span>    }<a name="line.124"></a>
-<span class="sourceLineNo">125</span>    LOG.info("Regions: " + Joiner.on(',').join(tableRegions));<a name="line.125"></a>
-<span class="sourceLineNo">126</span>    // We have three regions because one is split-in-progress<a name="line.126"></a>
-<span class="sourceLineNo">127</span>    assertEquals(3, tableRegions.size());<a name="line.127"></a>
-<span class="sourceLineNo">128</span>    LOG.info("Making sure we can call getTableRegionClosest while opening");<a name="line.128"></a>
-<span class="sourceLineNo">129</span>    Pair&lt;RegionInfo, ServerName&gt; pair =<a name="line.129"></a>
-<span class="sourceLineNo">130</span>        m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));<a name="line.130"></a>
-<span class="sourceLineNo">131</span>    LOG.info("Result is: " + pair);<a name="line.131"></a>
-<span class="sourceLineNo">132</span>    Pair&lt;RegionInfo, ServerName&gt; tableRegionFromName =<a name="line.132"></a>
-<span class="sourceLineNo">133</span>        MetaTableAccessor.getRegion(m.getConnection(),<a name="line.133"></a>
-<span class="sourceLineNo">134</span>          pair.getFirst().getRegionName());<a name="line.134"></a>
-<span class="sourceLineNo">135</span>    assertTrue(RegionInfo.COMPARATOR.compare(tableRegionFromName.getFirst(), pair.getFirst()) == 0);<a name="line.135"></a>
-<span class="sourceLineNo">136</span>  }<a name="line.136"></a>
-<span class="sourceLineNo">137</span><a name="line.137"></a>
-<span class="sourceLineNo">138</span>  @Test<a name="line.138"></a>
-<span class="sourceLineNo">139</span>  public void testMoveRegionWhenNotInitialized() {<a name="line.139"></a>
-<span class="sourceLineNo">140</span>    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();<a name="line.140"></a>
-<span class="sourceLineNo">141</span>    HMaster m = cluster.getMaster();<a name="line.141"></a>
-<span class="sourceLineNo">142</span>    try {<a name="line.142"></a>
-<span class="sourceLineNo">143</span>      m.setInitialized(false); // fake it, set back later<a name="line.143"></a>
-<span class="sourceLineNo">144</span>      RegionInfo meta = RegionInfoBuilder.FIRST_META_REGIONINFO;<a name="line.144"></a>
-<span class="sourceLineNo">145</span>      m.move(meta.getEncodedNameAsBytes(), null);<a name="line.145"></a>
-<span class="sourceLineNo">146</span>      fail("Region should not be moved since master is not initialized");<a name="line.146"></a>
-<span class="sourceLineNo">147</span>    } catch (IOException ioe) {<a name="line.147"></a>
-<span class="sourceLineNo">148</span>      assertTrue(ioe instanceof PleaseHoldException);<a name="line.148"></a>
-<span class="sourceLineNo">149</span>    } finally {<a name="line.149"></a>
-<span class="sourceLineNo">150</span>      m.setInitialized(true);<a name="line.150"></a>
-<span class="sourceLineNo">151</span>    }<a name="line.151"></a>
-<span class="sourceLineNo">152</span>  }<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>  @Test<a name="line.154"></a>
-<span class="sourceLineNo">155</span>  public void testMoveThrowsUnknownRegionException() throws IOException {<a name="line.155"></a>
-<span class="sourceLineNo">156</span>    final TableName tableName = TableName.valueOf(name.getMethodName());<a name="line.156"></a>
-<span class="sourceLineNo">157</span>    HTableDescriptor htd = new HTableDescriptor(tableName);<a name="line.157"></a>
-<span class="sourceLineNo">158</span>    HColumnDescriptor hcd = new HColumnDescriptor("value");<a name="line.158"></a>
-<span class="sourceLineNo">159</span>    htd.addFamily(hcd);<a name="line.159"></a>
-<span class="sourceLineNo">160</span><a name="line.160"></a>
-<span class="sourceLineNo">161</span>    admin.createTable(htd, null);<a name="line.161"></a>
-<span class="sourceLineNo">162</span>    try {<a name="line.162"></a>
-<span class="sourceLineNo">163</span>      RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)<a name="line.163"></a>
-<span class="sourceLineNo">164</span>          .setStartKey(Bytes.toBytes("A"))<a name="line.164"></a>
-<span class="sourceLineNo">165</span>          .setEndKey(Bytes.toBytes("Z"))<a name="line.165"></a>
-<span class="sourceLineNo">166</span>          .build();<a name="line.166"></a>
-<span class="sourceLineNo">167</span>      admin.move(hri.getEncodedNameAsBytes(), null);<a name="line.167"></a>
-<span class="sourceLineNo">168</span>      fail("Region should not be moved since it is fake");<a name="line.168"></a>
-<span class="sourceLineNo">169</span>    } catch (IOException ioe) {<a name="line.169"></a>
-<span class="sourceLineNo">170</span>      assertTrue(ioe instanceof UnknownRegionException);<a name="line.170"></a>
-<span class="sourceLineNo">171</span>    } finally {<a name="line.171"></a>
-<span class="sourceLineNo">172</span>      TEST_UTIL.deleteTable(tableName);<a name="line.172"></a>
-<span class="sourceLineNo">173</span>    }<a name="line.173"></a>
-<span class="sourceLineNo">174</span>  }<a name="line.174"></a>
-<span class="sourceLineNo">175</span><a name="line.175"></a>
-<span class="sourceLineNo">176</span>  @Test<a name="line.176"></a>
-<span class="sourceLineNo">177</span>  public void testMoveThrowsPleaseHoldException() throws IOException {<a name="line.177"></a>
-<span class="sourceLineNo">178</span>    final TableName tableName = TableName.valueOf(name.getMethodName());<a name="line.178"></a>
-<span class="sourceLineNo">179</span>    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();<a name="line.179"></a>
-<span class="sourceLineNo">180</span>    HTableDescriptor htd = new HTableDescriptor(tableName);<a name="line.180"></a>
-<span class="sourceLineNo">181</span>    HColumnDescriptor hcd = new HColumnDescriptor("value");<a name="line.181"></a>
-<span class="sourceLineNo">182</span>    htd.addFamily(hcd);<a name="line.182"></a>
-<span class="sourceLineNo">183</span><a name="line.183"></a>
-<span class="sourceLineNo">184</span>    admin.createTable(htd, null);<a name="line.184"></a>
-<span class="sourceLineNo">185</span>    try {<a name="line.185"></a>
-<span class="sourceLineNo">186</span>      List&lt;RegionInfo&gt; tableRegions = admin.getRegions(tableName);<a name="line.186"></a>
-<span class="sourceLineNo">187</span><a name="line.187"></a>
-<span class="sourceLineNo">188</span>      master.setInitialized(false); // fake it, set back later<a name="line.188"></a>
-<span class="sourceLineNo">189</span>      admin.move(tableRegions.get(0).getEncodedNameAsBytes(), null);<a name="line.189"></a>
-<span class="sourceLineNo">190</span>      fail("Region should not be moved since master is not initialized");<a name="line.190"></a>
-<span class="sourceLineNo">191</span>    } catch (IOException ioe) {<a name="line.191"></a>
-<span class="sourceLineNo">192</span>      assertTrue(StringUtils.stringifyException(ioe).contains("PleaseHoldException"));<a name="line.192"></a>
-<span class="sourceLineNo">193</span>    } finally {<a name="line.193"></a>
-<span class="sourceLineNo">194</span>      master.setInitialized(true);<a name="line.194"></a>
-<span class="sourceLineNo">195</span>      TEST_UTIL.deleteTable(tableName);<a name="line.195"></a>
-<span class="sourceLineNo">196</span>    }<a name="line.196"></a>
-<span class="sourceLineNo">197</span>  }<a name="line.197"></a>
-<span class="sourceLineNo">198</span><a name="line.198"></a>
-<span class="sourceLineNo">199</span>  @Test<a name="line.199"></a>
-<span class="sourceLineNo">200</span>  public void testFlushedSequenceIdPersistLoad() throws Exception {<a name="line.200"></a>
-<span class="sourceLineNo">201</span>    Configuration conf = TEST_UTIL.getConfiguration();<a name="line.201"></a>
-<span class="sourceLineNo">202</span>    int msgInterval = conf.getInt("hbase.regionserver.msginterval", 100);<a name="line.202"></a>
-<span class="sourceLineNo">203</span>    // insert some data into META<a name="line.203"></a>
-<span class="sourceLineNo">204</span>    TableName tableName = TableName.valueOf("testFlushSeqId");<a name="line.204"></a>
-<span class="sourceLineNo">205</span>    HTableDescriptor desc = new HTableDescriptor(tableName);<a name="line.205"></a>
-<span class="sourceLineNo">206</span>    desc.addFamily(new HColumnDescriptor(Bytes.toBytes("cf")));<a name="line.206"></a>
-<span class="sourceLineNo">207</span>    Table table = TEST_UTIL.createTable(desc, null);<a name="line.207"></a>
-<span class="sourceLineNo">208</span>    // flush META region<a name="line.208"></a>
-<span class="sourceLineNo">209</span>    TEST_UTIL.flush(TableName.META_TABLE_NAME);<a name="line.209"></a>
-<span class="sourceLineNo">210</span>    // wait for regionserver report<a name="line.210"></a>
-<span class="sourceLineNo">211</span>    Threads.sleep(msgInterval * 2);<a name="line.211"></a>
-<span class="sourceLineNo">212</span>    // record flush seqid before cluster shutdown<a name="line.212"></a>
-<span class="sourceLineNo">213</span>    Map&lt;byte[], Long&gt; regionMapBefore =<a name="line.213"></a>
-<span class="sourceLineNo">214</span>        TEST_UTIL.getHBaseCluster().getMaster().getServerManager()<a name="line.214"></a>
-<span class="sourceLineNo">215</span>            .getFlushedSequenceIdByRegion();<a name="line.215"></a>
-<span class="sourceLineNo">216</span>    // restart hbase cluster which will cause flushed sequence id persist and reload<a name="line.216"></a>
-<span class="sourceLineNo">217</span>    TEST_UTIL.getMiniHBaseCluster().shutdown();<a name="line.217"></a>
-<span class="sourceLineNo">218</span>    TEST_UTIL.restartHBaseCluster(2);<a name="line.218"></a>
-<span class="sourceLineNo">219</span>    TEST_UTIL.waitUntilNoRegionsInTransition();<a name="line.219"></a>
-<span class="sourceLineNo">220</span>    // check equality after reloading flushed sequence id map<a name="line.220"></a>
-<span class="sourceLineNo">221</span>    Map&lt;byte[], Long&gt; regionMapAfter =<a name="line.221"></a>
-<span class="sourceLineNo">222</span>        TEST_UTIL.getHBaseCluster().getMaster().getServerManager()<a name="line.222"></a>
-<span class="sourceLineNo">223</span>            .getFlushedSequenceIdByRegion();<a name="line.223"></a>
-<span class="sourceLineNo">224</span>    assertTrue(regionMapBefore.equals(regionMapAfter));<a name="line.224"></a>
-<span class="sourceLineNo">225</span><a name="line.225"></a>
-<span class="sourceLineNo">226</span><a name="line.226"></a>
-<span class="sourceLineNo">227</span>  }<a name="line.227"></a>
-<span class="sourceLineNo">228</span>}<a name="line.228"></a>
+<span class="sourceLineNo">081</span>  @Rule<a name="line.81"></a>
+<span class="sourceLineNo">082</span>  public TestName name = new TestName();<a name="line.82"></a>
+<span class="sourceLineNo">083</span><a name="line.83"></a>
+<span class="sourceLineNo">084</span>  @BeforeClass<a name="line.84"></a>
+<span class="sourceLineNo">085</span>  public static void beforeAllTests() throws Exception {<a name="line.85"></a>
+<span class="sourceLineNo">086</span>    // we will retry operations when PleaseHoldException is thrown<a name="line.86"></a>
+<span class="sourceLineNo">087</span>    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);<a name="line.87"></a>
+<span class="sourceLineNo">088</span>    // Start a cluster of two regionservers.<a name="line.88"></a>
+<span class="sourceLineNo">089</span>    TEST_UTIL.startMiniCluster(2);<a name="line.89"></a>
+<span class="sourceLineNo">090</span>    admin = TEST_UTIL.getAdmin();<a name="line.90"></a>
+<span class="sourceLineNo">091</span>  }<a name="line.91"></a>
+<span class="sourceLineNo">092</span><a name="line.92"></a>
+<span class="sourceLineNo">093</span>  @AfterClass<a name="line.93"></a>
+<span class="sourceLineNo">094</span>  public static void afterAllTests() throws Exception {<a name="line.94"></a>
+<span class="sourceLineNo">095</span>    TEST_UTIL.shutdownMiniCluster();<a name="line.95"></a>
+<span class="sourceLineNo">096</span>  }<a name="line.96"></a>
+<span class="sourceLineNo">097</span><a name="line.97"></a>
+<span class="sourceLineNo">098</span>  @Test<a name="line.98"></a>
+<span class="sourceLineNo">099</span>  @SuppressWarnings("deprecation")<a name="line.99"></a>
+<span class="sourceLineNo">100</span>  public void testMasterOpsWhileSplitting() throws Exception {<a name="line.100"></a>
+<span class="sourceLineNo">101</span>    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();<a name="line.101"></a>
+<span class="sourceLineNo">102</span>    HMaster m = cluster.getMaster();<a name="line.102"></a>
+<span class="sourceLineNo">103</span><a name="line.103"></a>
+<span class="sourceLineNo">104</span>    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {<a name="line.104"></a>
+<span class="sourceLineNo">105</span>      assertTrue(m.getTableStateManager().isTableState(TABLENAME, TableState.State.ENABLED));<a name="line.105"></a>
+<span class="sourceLineNo">106</span>      TEST_UTIL.loadTable(ht, FAMILYNAME, false);<a name="line.106"></a>
+<span class="sourceLineNo">107</span>    }<a name="line.107"></a>
+<span class="sourceLineNo">108</span><a name="line.108"></a>
+<span class="sourceLineNo">109</span>    List&lt;Pair&lt;RegionInfo, ServerName&gt;&gt; tableRegions = MetaTableAccessor.getTableRegionsAndLocations(<a name="line.109"></a>
+<span class="sourceLineNo">110</span>        m.getConnection(), TABLENAME);<a name="line.110"></a>
+<span class="sourceLineNo">111</span>    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));<a name="line.111"></a>
+<span class="sourceLineNo">112</span>    assertEquals(1, tableRegions.size());<a name="line.112"></a>
+<span class="sourceLineNo">113</span>    assertArrayEquals(HConstants.EMPTY_START_ROW,<a name="line.113"></a>
+<span class="sourceLineNo">114</span>        tableRegions.get(0).getFirst().getStartKey());<a name="line.114"></a>
+<span class="sourceLineNo">115</span>    assertArrayEquals(HConstants.EMPTY_END_ROW,<a name="line.115"></a>
+<span class="sourceLineNo">116</span>        tableRegions.get(0).getFirst().getEndKey());<a name="line.116"></a>
+<span class="sourceLineNo">117</span><a name="line.117"></a>
+<span class="sourceLineNo">118</span>    // Now trigger a split and stop when the split is in progress<a name="line.118"></a>
+<span class="sourceLineNo">119</span>    LOG.info("Splitting table");<a name="line.119"></a>
+<span class="sourceLineNo">120</span>    TEST_UTIL.getAdmin().split(TABLENAME);<a name="line.120"></a>
+<span class="sourceLineNo">121</span><a name="line.121"></a>
+<span class="sourceLineNo">122</span>    LOG.info("Making sure we can call getTableRegions while opening");<a name="line.122"></a>
+<span class="sourceLineNo">123</span>    while (tableRegions.size() &lt; 3) {<a name="line.123"></a>
+<span class="sourceLineNo">124</span>      tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(),<a name="line.124"></a>
+<span class="sourceLineNo">125</span>          TABLENAME, false);<a name="line.125"></a>
+<span class="sourceLineNo">126</span>      Thread.sleep(100);<a name="line.126"></a>
+<span class="sourceLineNo">127</span>    }<a name="line.127"></a>
+<span class="sourceLineNo">128</span>    LOG.info("Regions: " + Joiner.on(',').join(tableRegions));<a name="line.128"></a>
+<span class="sourceLineNo">129</span>    // We have three regions because one is split-in-progress<a name="line.129"></a>
+<span class="sourceLineNo">130</span>    assertEquals(3, tableRegions.size());<a name="line.130"></a>
+<span class="sourceLineNo">131</span>    LOG.info("Making sure we can call getTableRegionClosest while opening");<a name="line.131"></a>
+<span class="sourceLineNo">132</span>    Pair&lt;RegionInfo, ServerName&gt; pair =<a name="line.132"></a>
+<span class="sourceLineNo">133</span>        m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));<a name="line.133"></a>
+<span class="sourceLineNo">134</span>    LOG.info("Result is: " + pair);<a name="line.134"></a>
+<span class="sourceLineNo">135</span>    Pair&lt;RegionInfo, ServerName&gt; tableRegionFromName =<a name="line.135"></a>
+<span class="sourceLineNo">136</span>        MetaTableAccessor.getRegion(m.getConnection(),<a name="line.136"></a>
+<span class="sourceLineNo">137</span>          pair.getFirst().getRegionName());<a name="line.137"></a>
+<span class="sourceLineNo">138</span>    assertTrue(RegionInfo.COMPARATOR.compare(tableRegionFromName.getFirst(), pair.getFirst()) == 0);<a name="line.138"></a>
+<span class="sourceLineNo">139</span>  }<a name="line.139"></a>
+<span class="sourceLineNo">140</span><a name="line.140"></a>
+<span class="sourceLineNo">141</span>  @Test<a name="line.141"></a>
+<span class="sourceLineNo">142</span>  public void testMoveRegionWhenNotInitialized() {<a name="line.142"></a>
+<span class="sourceLineNo">143</span>    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();<a name="line.143"></a>
+<span class="sourceLineNo">144</span>    HMaster m = cluster.getMaster();<a name="line.144"></a>
+<span class="sourceLineNo">145</span>    try {<a name="line.145"></a>
+<span class="sourceLineNo">146</span>      m.setInitialized(false); // fake it, set back later<a name="line.146"></a>
+<span class="sourceLineNo">147</span>      RegionInfo meta = RegionInfoBuilder.FIRST_META_REGIONINFO;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>      m.move(meta.getEncodedNameAsBytes(), null);<a name="line.148"></a>
+<span class="sourceLineNo">149</span>      fail("Region should not be moved since master is not initialized");<a name="line.149"></a>
+<span class="sourceLineNo">150</span>    } catch (IOException ioe) {<a name="line.150"></a>
+<span class="sourceLineNo">151</span>      assertTrue(ioe instanceof PleaseHoldException);<a name="line.151"></a>
+<span class="sourceLineNo">152</span>    } finally {<a name="line.152"></a>
+<span class="sourceLineNo">153</span>      m.setInitialized(true);<a name="line.153"></a>
+<span class="sourceLineNo">154</span>    }<a name="line.154"></a>
+<span class="sourceLineNo">155</span>  }<a name="line.155"></a>
+<span class="sourceLineNo">156</span><a name="line.156"></a>
+<span class="sourceLineNo">157</span>  @Test<a name="line.157"></a>
+<span class="sourceLineNo">158</span>  public void testMoveThrowsUnknownRegionException() throws IOException {<a name="line.158"></a>
+<span class="sourceLineNo">159</span>    final TableName tableName = TableName.valueOf(name.getMethodName());<a name="line.159"></a>
+<span class="sourceLineNo">160</span>    HTableDescriptor htd = new HTableDescriptor(tableName);<a name="line.160"></a>
+<span class="sourceLineNo">161</span>    HColumnDescriptor hcd = new HColumnDescriptor("value");<a name="line.161"></a>
+<span class="sourceLineNo">162</span>    htd.addFamily(hcd);<a name="line.162"></a>
+<span class="sourceLineNo">163</span><a name="line.163"></a>
+<span class="sourceLineNo">164</span>    admin.createTable(htd, null);<a name="line.164"></a>
+<span class="sourceLineNo">165</span>    try {<a name="line.165"></a>
+<span class="sourceLineNo">166</span>      RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)<a name="line.166"></a>
+<span class="sourceLineNo">167</span>          .setStartKey(Bytes.toBytes("A"))<a name="line.167"></a>
+<span class="sourceLineNo">168</span>          .setEndKey(Bytes.toBytes("Z"))<a name="line.168"></a>
+<span class="sourceLineNo">169</span>          .build();<a name="line.169"></a>
+<span class="sourceLineNo">170</span>      admin.move(hri.getEncodedNameAsBytes(), null);<a name="line.170"></a>
+<span class="sourceLineNo">171</span>      fail("Region should not be moved since it is fake");<a name="line.171"></a>
+<span class="sourceLineNo">172</span>    } catch (IOException ioe) {<a name="line.172"></a>
+<span class="sourceLineNo">173</span>      assertTrue(ioe instanceof UnknownRegionException);<a name="line.173"></a>
+<span class="sourceLineNo">174</span>    } finally {<a name="line.174"></a>
+<span class="sourceLineNo">175</span>      TEST_UTIL.deleteTable(tableName);<a name="line.175"></a>
+<span class="sourceLineNo">176</span>    }<a name="line.176"></a>
+<span class="sourceLineNo">177</span>  }<a name="line.177"></a>
+<span class="sourceLineNo">178</span><a name="line.178"></a>
+<span class="sourceLineNo">179</span>  @Test<a name="line.179"></a>
+<span class="sourceLineNo">180</span>  public void testMoveThrowsPleaseHoldException() throws IOException {<a name="line.180"></a>
+<span class="sourceLineNo">181</span>    final TableName tableName = TableName.valueOf(name.getMethodName());<a name="line.181"></a>
+<span class="sourceLineNo">182</span>    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();<a name="line.182"></a>
+<span class="sourceLineNo">183</span>    HTableDescriptor htd = new HTableDescriptor(tableName);<a name="line.183"></a>
+<span class="sourceLineNo">184</span>    HColumnDescriptor hcd = new HColumnDescriptor("value");<a name="line.184"></a>
+<span class="sourceLineNo">185</span>    htd.addFamily(hcd);<a name="line.185"></a>
+<span class="sourceLineNo">186</span><a name="line.186"></a>
+<span class="sourceLineNo">187</span>    admin.createTable(htd, null);<a name="line.187"></a>
+<span class="sourceLineNo">188</span>    try {<a name="line.188"></a>
+<span class="sourceLineNo">189</span>      List&lt;RegionInfo&gt; tableRegions = admin.getRegions(tableName);<a name="line.189"></a>
+<span class="sourceLineNo">190</span><a name="line.190"></a>
+<span class="sourceLineNo">191</span>      master.setInitialized(false); // fake it, set back later<a name="line.191"></a>
+<span class="sourceLineNo">192</span>      admin.move(tableRegions.get(0).getEncodedNameAsBytes(), null);<a name="line.192"></a>
+<span class="sourceLineNo">193</span>      fail("Region should not be moved since master is not initialized");<a name="line.193"></a>
+<span class="sourceLineNo">194</span>    } catch (IOException ioe) {<a name="line.194"></a>
+<span class="sourceLineNo">195</span>      assertTrue(StringUtils.stringifyException(ioe).contains("PleaseHoldException"));<a name="line.195"></a>
+<span class="sourceLineNo">196</span>    } finally {<a name="line.196"></a>
+<span class="sourceLineNo">197</span>      master.setInitialized(true);<a name="line.197"></a>
+<span class="sourceLineNo">198</span>      TEST_UTIL.deleteTable(tableName);<a name="line.198"></a>
+<span class="sourceLineNo">199</span>    }<a name="line.199"></a>
+<span class="sourceLineNo">200</span>  }<a name="line.200"></a>
+<span class="sourceLineNo">201</span><a name="line.201"></a>
+<span class="sourceLineNo">202</span>  @Test<a name="line.202"></a>
+<span class="sourceLineNo">203</span>  public void testFlushedSequenceIdPersistLoad() throws Exception {<a name="line.203"></a>
+<span class="sourceLineNo">204</span>    Configuration conf = TEST_UTIL.getConfiguration();<a name="line.204"></a>
+<span class="sourceLineNo">205</span>    int msgInterval = conf.getInt("hbase.regionserver.msginterval", 100);<a name="line.205"></a>
+<span class="sourceLineNo">206</span>    // insert some data into META<a name="line.206"></a>
+<span class="sourceLineNo">207</span>    TableName tableName = TableName.valueOf("testFlushSeqId");<a name="line.207"></a>
+<span class="sourceLineNo">208</span>    HTableDescriptor desc = new HTableDescriptor(tableName);<a name="line.208"></a>
+<span class="sourceLineNo">209</span>    desc.addFamily(new HColumnDescriptor(Bytes.toBytes("cf")));<a name="line.209"></a>
+<span class="sourceLineNo">210</span>    Table table = TEST_UTIL.createTable(desc, null);<a name="line.210"></a>
+<span class="sourceLineNo">211</span>    // flush META region<a name="line.211"></a>
+<span class="sourceLineNo">212</span>    TEST_UTIL.flush(TableName.META_TABLE_NAME);<a name="line.212"></a>
+<span class="sourceLineNo">213</span>    // wait for regionserver report<a name="line.213"></a>
+<span class="sourceLineNo">214</span>    Threads.sleep(msgInterval * 2);<a name="line.214"></a>
+<span class="sourceLineNo">215</span>    // record flush seqid before cluster shutdown<a name="line.215"></a>
+<span class="sourceLineNo">216</span>    Map&lt;byte[], Long&gt; regionMapBefore =<a name="line.216"></a>
+<span class="sourceLineNo">217</span>        TEST_UTIL.getHBaseCluster().getMaster().getServerManager()<a name="line.217"></a>
+<span class="sourceLineNo">218</span>            .getFlushedSequenceIdByRegion();<a name="line.218"></a>
+<span class="sourceLineNo">219</span>    // restart hbase cluster which will cause flushed sequence id persist and reload<a name="line.219"></a>
+<span class="sourceLineNo">220</span>    TEST_UTIL.getMiniHBaseCluster().shutdown();<a name="line.220"></a>
+<span class="sourceLineNo">221</span>    TEST_UTIL.restartHBaseCluster(2);<a name="line.221"></a>
+<span class="sourceLineNo">222</span>    TEST_UTIL.waitUntilNoRegionsInTransition();<a name="line.222"></a>
+<span class="sourceLineNo">223</span>    // check equality after reloading flushed sequence id map<a name="line.223"></a>
+<span class="sourceLineNo">224</span>    Map&lt;byte[], Long&gt; regionMapAfter =<a name="line.224"></a>
+<span class="sourceLineNo">225</span>        TEST_UTIL.getHBaseCluster().getMaster().getServerManager()<a name="line.225"></a>
+<span class="sourceLineNo">226</span>            .getFlushedSequenceIdByRegion();<a name="line.226"></a>
+<span class="sourceLineNo">227</span>    assertTrue(regionMapBefore.equals(regionMapAfter));<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  }<a name="line.228"></a>
 <span class="sourceLineNo">229</span><a name="line.229"></a>
+<span class="sourceLineNo">230</span>  @Test<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  public void testBlockingHbkc1WithLockFile() throws IOException {<a name="line.231"></a>
+<span class="sourceLineNo">232</span>    // This is how the patch to the lock file is created inside in HBaseFsck. Too hard to use its<a name="line.232"></a>
+<span class="sourceLineNo">233</span>    // actual method without disturbing HBaseFsck... Do the below mimic instead.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>    Path hbckLockPath = new Path(HBaseFsck.getTmpDir(TEST_UTIL.getConfiguration()),<a name="line.234"></a>
+<span class="sourceLineNo">235</span>        HBaseFsck.HBCK_LOCK_FILE);<a name="line.235"></a>
+<span class="sourceLineNo">236</span>    FileSystem fs = TEST_UTIL.getTestFileSystem();<a name="line.236"></a>
+<span class="sourceLineNo">237</span>    assertTrue(fs.exists(hbckLockPath));<a name="line.237"></a>
+<span class="sourceLineNo">238</span>    TEST_UTIL.getMiniHBaseCluster().<a name="line.238"></a>
+<span class="sourceLineNo">239</span>        killMaster(TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName());<a name="line.239"></a>
+<span class="sourceLineNo">240</span>    assertTrue(fs.exists(hbckLockPath));<a name="line.240"></a>
+<span class="sourceLineNo">241</span>    TEST_UTIL.getMiniHBaseCluster().startMaster();<a name="line.241"></a>
+<span class="sourceLineNo">242</span>    TEST_UTIL.waitFor(30000, () -&gt; TEST_UTIL.getMiniHBaseCluster().getMaster() != null &amp;&amp;<a name="line.242"></a>
+<span class="sourceLineNo">243</span>        TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());<a name="line.243"></a>
+<span class="sourceLineNo">244</span>    assertTrue(fs.exists(hbckLockPath));<a name="line.244"></a>
+<span class="sourceLineNo">245</span>    // Start a second Master. Should be fine.<a name="line.245"></a>
+<span class="sourceLineNo">246</span>    TEST_UTIL.getMiniHBaseCluster().startMaster();<a name="line.246"></a>
+<span class="sourceLineNo">247</span>    assertTrue(fs.exists(hbckLockPath));<a name="line.247"></a>
+<span class="sourceLineNo">248</span>    fs.delete(hbckLockPath, true);<a name="line.248"></a>
+<span class="sourceLineNo">249</span>    assertFalse(fs.exists(hbckLockPath));<a name="line.249"></a>
+<span class="sourceLineNo">250</span>    // Kill all Masters.<a name="line.250"></a>
+<span class="sourceLineNo">251</span>    TEST_UTIL.getMiniHBaseCluster().getLiveMasterThreads().stream().<a name="line.251"></a>
+<span class="sourceLineNo">252</span>        map(sn -&gt; sn.getMaster().getServerName()).forEach(sn -&gt; {<a name="line.252"></a>
+<span class="sourceLineNo">253</span>          try {<a name="line.253"></a>
+<span class="sourceLineNo">254</span>            TEST_UTIL.getMiniHBaseCluster().killMaster(sn);<a name="line.254"></a>
+<span class="sourceLineNo">255</span>          } catch (IOException e) {<a name="line.255"></a>
+<span class="sourceLineNo">256</span>            e.printStackTrace();<a name="line.256"></a>
+<span class="sourceLineNo">257</span>          }<a name="line.257"></a>
+<span class="sourceLineNo">258</span>        });<a name="line.258"></a>
+<span class="sourceLineNo">259</span>    // Start a new one.<a name="line.259"></a>
+<span class="sourceLineNo">260</span>    TEST_UTIL.getMiniHBaseCluster().startMaster();<a name="line.260"></a>
+<span class="sourceLineNo">261</span>    TEST_UTIL.waitFor(30000, () -&gt; TEST_UTIL.getMiniHBaseCluster().getMaster() != null &amp;&amp;<a name="line.261"></a>
+<span class="sourceLineNo">262</span>        TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());<a name="line.262"></a>
+<span class="sourceLineNo">263</span>    // Assert lock gets put in place again.<a name="line.263"></a>
+<span class="sourceLineNo">264</span>    assertTrue(fs.exists(hbckLockPath));<a name="line.264"></a>
+<span class="sourceLineNo">265</span>  }<a name="line.265"></a>
+<span class="sourceLineNo">266</span>}<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
 
 
 


[34/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 8b16208..2815504 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 <hr>
 <br>
 <pre>@InterfaceAudience.LimitedPrivate(value="Tools")
-public class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.249">HMaster</a>
+public class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.250">HMaster</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a>
 implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></pre>
 <div class="block">HMaster is the "master server" for HBase. An HBase cluster has one active
@@ -1515,7 +1515,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>LOG</h4>
-<pre>private static&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.250">LOG</a></pre>
+<pre>private static&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.251">LOG</a></pre>
 </li>
 </ul>
 <a name="MASTER">
@@ -1524,7 +1524,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>MASTER</h4>
-<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.306">MASTER</a></pre>
+<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.307">MASTER</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.master.HMaster.MASTER">Constant Field Values</a></dd>
@@ -1537,7 +1537,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>activeMasterManager</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ActiveMasterManager.html" title="class in org.apache.hadoop.hbase.master">ActiveMasterManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.309">activeMasterManager</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ActiveMasterManager.html" title="class in org.apache.hadoop.hbase.master">ActiveMasterManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.310">activeMasterManager</a></pre>
 </li>
 </ul>
 <a name="regionServerTracker">
@@ -1546,7 +1546,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>regionServerTracker</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/RegionServerTracker.html" title="class in org.apache.hadoop.hbase.master">RegionServerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.311">regionServerTracker</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/RegionServerTracker.html" title="class in org.apache.hadoop.hbase.master">RegionServerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.312">regionServerTracker</a></pre>
 </li>
 </ul>
 <a name="drainingServerTracker">
@@ -1555,7 +1555,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>drainingServerTracker</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/DrainingServerTracker.html" title="class in org.apache.hadoop.hbase.master">DrainingServerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.313">drainingServerTracker</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/DrainingServerTracker.html" title="class in org.apache.hadoop.hbase.master">DrainingServerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.314">drainingServerTracker</a></pre>
 </li>
 </ul>
 <a name="loadBalancerTracker">
@@ -1564,7 +1564,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>loadBalancerTracker</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.html" title="class in org.apache.hadoop.hbase.zookeeper">LoadBalancerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.315">loadBalancerTracker</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.html" title="class in org.apache.hadoop.hbase.zookeeper">LoadBalancerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.316">loadBalancerTracker</a></pre>
 </li>
 </ul>
 <a name="metaLocationSyncer">
@@ -1573,7 +1573,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>metaLocationSyncer</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.html" title="class in org.apache.hadoop.hbase.master.zksyncer">MetaLocationSyncer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.317">metaLocationSyncer</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/master/zksyncer/MetaLocationSyncer.html" title="class in org.apache.hadoop.hbase.master.zksyncer">MetaLocationSyncer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.318">metaLocationSyncer</a></pre>
 </li>
 </ul>
 <a name="masterAddressSyncer">
@@ -1582,7 +1582,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>masterAddressSyncer</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.html" title="class in org.apache.hadoop.hbase.master.zksyncer">MasterAddressSyncer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.319">masterAddressSyncer</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/master/zksyncer/MasterAddressSyncer.html" title="class in org.apache.hadoop.hbase.master.zksyncer">MasterAddressSyncer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.320">masterAddressSyncer</a></pre>
 </li>
 </ul>
 <a name="splitOrMergeTracker">
@@ -1591,7 +1591,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>splitOrMergeTracker</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/SplitOrMergeTracker.html" title="class in org.apache.hadoop.hbase.master">SplitOrMergeTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.322">splitOrMergeTracker</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/SplitOrMergeTracker.html" title="class in org.apache.hadoop.hbase.master">SplitOrMergeTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.323">splitOrMergeTracker</a></pre>
 </li>
 </ul>
 <a name="regionNormalizerTracker">
@@ -1600,7 +1600,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>regionNormalizerTracker</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.html" title="class in org.apache.hadoop.hbase.zookeeper">RegionNormalizerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.325">regionNormalizerTracker</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.html" title="class in org.apache.hadoop.hbase.zookeeper">RegionNormalizerTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.326">regionNormalizerTracker</a></pre>
 </li>
 </ul>
 <a name="maintenanceModeTracker">
@@ -1609,7 +1609,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>maintenanceModeTracker</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.html" title="class in org.apache.hadoop.hbase.zookeeper">MasterMaintenanceModeTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.328">maintenanceModeTracker</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.html" title="class in org.apache.hadoop.hbase.zookeeper">MasterMaintenanceModeTracker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.329">maintenanceModeTracker</a></pre>
 </li>
 </ul>
 <a name="clusterSchemaService">
@@ -1618,7 +1618,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>clusterSchemaService</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ClusterSchemaService.html" title="interface in org.apache.hadoop.hbase.master">ClusterSchemaService</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.330">clusterSchemaService</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ClusterSchemaService.html" title="interface in org.apache.hadoop.hbase.master">ClusterSchemaService</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.331">clusterSchemaService</a></pre>
 </li>
 </ul>
 <a name="HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS">
@@ -1627,7 +1627,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS</h4>
-<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.332">HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS</a></pre>
+<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.333">HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.master.HMaster.HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS">Constant Field Values</a></dd>
@@ -1640,7 +1640,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS</h4>
-<pre>public static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.334">DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS</a></pre>
+<pre>public static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.335">DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.master.HMaster.DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS">Constant Field Values</a></dd>
@@ -1653,7 +1653,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>metricsMaster</h4>
-<pre>final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMaster.html" title="class in org.apache.hadoop.hbase.master">MetricsMaster</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.337">metricsMaster</a></pre>
+<pre>final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMaster.html" title="class in org.apache.hadoop.hbase.master">MetricsMaster</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.338">metricsMaster</a></pre>
 </li>
 </ul>
 <a name="fileSystemManager">
@@ -1662,7 +1662,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>fileSystemManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterFileSystem.html" title="class in org.apache.hadoop.hbase.master">MasterFileSystem</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.339">fileSystemManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterFileSystem.html" title="class in org.apache.hadoop.hbase.master">MasterFileSystem</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.340">fileSystemManager</a></pre>
 </li>
 </ul>
 <a name="walManager">
@@ -1671,7 +1671,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>walManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterWalManager.html" title="class in org.apache.hadoop.hbase.master">MasterWalManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.340">walManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterWalManager.html" title="class in org.apache.hadoop.hbase.master">MasterWalManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.341">walManager</a></pre>
 </li>
 </ul>
 <a name="serverManager">
@@ -1680,7 +1680,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>serverManager</h4>
-<pre>private volatile&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master">ServerManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.343">serverManager</a></pre>
+<pre>private volatile&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master">ServerManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.344">serverManager</a></pre>
 </li>
 </ul>
 <a name="assignmentManager">
@@ -1689,7 +1689,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>assignmentManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/assignment/AssignmentManager.html" title="class in org.apache.hadoop.hbase.master.assignment">AssignmentManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.346">assignmentManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/assignment/AssignmentManager.html" title="class in org.apache.hadoop.hbase.master.assignment">AssignmentManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.347">assignmentManager</a></pre>
 </li>
 </ul>
 <a name="replicationPeerManager">
@@ -1698,7 +1698,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>replicationPeerManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html" title="class in org.apache.hadoop.hbase.master.replication">ReplicationPeerManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.349">replicationPeerManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html" title="class in org.apache.hadoop.hbase.master.replication">ReplicationPeerManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.350">replicationPeerManager</a></pre>
 </li>
 </ul>
 <a name="syncReplicationReplayWALManager">
@@ -1707,7 +1707,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>syncReplicationReplayWALManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html" title="class in org.apache.hadoop.hbase.master.replication">SyncReplicationReplayWALManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.351">syncReplicationReplayWALManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html" title="class in org.apache.hadoop.hbase.master.replication">SyncReplicationReplayWALManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.352">syncReplicationReplayWALManager</a></pre>
 </li>
 </ul>
 <a name="rsFatals">
@@ -1716,7 +1716,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>rsFatals</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.html" title="class in org.apache.hadoop.hbase.monitoring">MemoryBoundedLogMessageBuffer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.356">rsFatals</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.html" title="class in org.apache.hadoop.hbase.monitoring">MemoryBoundedLogMessageBuffer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.357">rsFatals</a></pre>
 </li>
 </ul>
 <a name="activeMaster">
@@ -1725,7 +1725,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>activeMaster</h4>
-<pre>private volatile&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.359">activeMaster</a></pre>
+<pre>private volatile&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.360">activeMaster</a></pre>
 </li>
 </ul>
 <a name="initialized">
@@ -1734,7 +1734,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>initialized</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure2/ProcedureEvent.html" title="class in org.apache.hadoop.hbase.procedure2">ProcedureEvent</a>&lt;?&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.362">initialized</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure2/ProcedureEvent.html" title="class in org.apache.hadoop.hbase.procedure2">ProcedureEvent</a>&lt;?&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.363">initialized</a></pre>
 </li>
 </ul>
 <a name="serviceStarted">
@@ -1743,7 +1743,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>serviceStarted</h4>
-<pre>volatile&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.366">serviceStarted</a></pre>
+<pre>volatile&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.367">serviceStarted</a></pre>
 </li>
 </ul>
 <a name="maxBlancingTime">
@@ -1752,7 +1752,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>maxBlancingTime</h4>
-<pre>private final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.369">maxBlancingTime</a></pre>
+<pre>private final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.370">maxBlancingTime</a></pre>
 </li>
 </ul>
 <a name="maxRitPercent">
@@ -1761,7 +1761,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>maxRitPercent</h4>
-<pre>private final&nbsp;double <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.371">maxRitPercent</a></pre>
+<pre>private final&nbsp;double <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.372">maxRitPercent</a></pre>
 </li>
 </ul>
 <a name="lockManager">
@@ -1770,7 +1770,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>lockManager</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/locking/LockManager.html" title="class in org.apache.hadoop.hbase.master.locking">LockManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.373">lockManager</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/locking/LockManager.html" title="class in org.apache.hadoop.hbase.master.locking">LockManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.374">lockManager</a></pre>
 </li>
 </ul>
 <a name="balancer">
@@ -1779,7 +1779,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>balancer</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/LoadBalancer.html" title="interface in org.apache.hadoop.hbase.master">LoadBalancer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.375">balancer</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/LoadBalancer.html" title="interface in org.apache.hadoop.hbase.master">LoadBalancer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.376">balancer</a></pre>
 </li>
 </ul>
 <a name="normalizer">
@@ -1788,7 +1788,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>normalizer</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html" title="interface in org.apache.hadoop.hbase.master.normalizer">RegionNormalizer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.376">normalizer</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.html" title="interface in org.apache.hadoop.hbase.master.normalizer">RegionNormalizer</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.377">normalizer</a></pre>
 </li>
 </ul>
 <a name="balancerChore">
@@ -1797,7 +1797,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>balancerChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/balancer/BalancerChore.html" title="class in org.apache.hadoop.hbase.master.balancer">BalancerChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.377">balancerChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/balancer/BalancerChore.html" title="class in org.apache.hadoop.hbase.master.balancer">BalancerChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.378">balancerChore</a></pre>
 </li>
 </ul>
 <a name="normalizerChore">
@@ -1806,7 +1806,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>normalizerChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.html" title="class in org.apache.hadoop.hbase.master.normalizer">RegionNormalizerChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.378">normalizerChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.html" title="class in org.apache.hadoop.hbase.master.normalizer">RegionNormalizerChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.379">normalizerChore</a></pre>
 </li>
 </ul>
 <a name="clusterStatusChore">
@@ -1815,7 +1815,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>clusterStatusChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.html" title="class in org.apache.hadoop.hbase.master.balancer">ClusterStatusChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.379">clusterStatusChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.html" title="class in org.apache.hadoop.hbase.master.balancer">ClusterStatusChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.380">clusterStatusChore</a></pre>
 </li>
 </ul>
 <a name="clusterStatusPublisherChore">
@@ -1824,7 +1824,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>clusterStatusPublisherChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ClusterStatusPublisher.html" title="class in org.apache.hadoop.hbase.master">ClusterStatusPublisher</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.380">clusterStatusPublisherChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ClusterStatusPublisher.html" title="class in org.apache.hadoop.hbase.master">ClusterStatusPublisher</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.381">clusterStatusPublisherChore</a></pre>
 </li>
 </ul>
 <a name="catalogJanitorChore">
@@ -1833,7 +1833,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>catalogJanitorChore</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/master/CatalogJanitor.html" title="class in org.apache.hadoop.hbase.master">CatalogJanitor</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.382">catalogJanitorChore</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/master/CatalogJanitor.html" title="class in org.apache.hadoop.hbase.master">CatalogJanitor</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.383">catalogJanitorChore</a></pre>
 </li>
 </ul>
 <a name="logCleaner">
@@ -1842,7 +1842,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>logCleaner</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/cleaner/LogCleaner.html" title="class in org.apache.hadoop.hbase.master.cleaner">LogCleaner</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.383">logCleaner</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/cleaner/LogCleaner.html" title="class in org.apache.hadoop.hbase.master.cleaner">LogCleaner</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.384">logCleaner</a></pre>
 </li>
 </ul>
 <a name="hfileCleaner">
@@ -1851,7 +1851,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>hfileCleaner</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/cleaner/HFileCleaner.html" title="class in org.apache.hadoop.hbase.master.cleaner">HFileCleaner</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.384">hfileCleaner</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/cleaner/HFileCleaner.html" title="class in org.apache.hadoop.hbase.master.cleaner">HFileCleaner</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.385">hfileCleaner</a></pre>
 </li>
 </ul>
 <a name="replicationBarrierCleaner">
@@ -1860,7 +1860,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>replicationBarrierCleaner</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.html" title="class in org.apache.hadoop.hbase.master.cleaner">ReplicationBarrierCleaner</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.385">replicationBarrierCleaner</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.html" title="class in org.apache.hadoop.hbase.master.cleaner">ReplicationBarrierCleaner</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.386">replicationBarrierCleaner</a></pre>
 </li>
 </ul>
 <a name="expiredMobFileCleanerChore">
@@ -1869,7 +1869,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>expiredMobFileCleanerChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.html" title="class in org.apache.hadoop.hbase.master">ExpiredMobFileCleanerChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.386">expiredMobFileCleanerChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.html" title="class in org.apache.hadoop.hbase.master">ExpiredMobFileCleanerChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.387">expiredMobFileCleanerChore</a></pre>
 </li>
 </ul>
 <a name="mobCompactChore">
@@ -1878,7 +1878,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>mobCompactChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MobCompactionChore.html" title="class in org.apache.hadoop.hbase.master">MobCompactionChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.387">mobCompactChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MobCompactionChore.html" title="class in org.apache.hadoop.hbase.master">MobCompactionChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.388">mobCompactChore</a></pre>
 </li>
 </ul>
 <a name="mobCompactThread">
@@ -1887,7 +1887,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>mobCompactThread</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterMobCompactionThread.html" title="class in org.apache.hadoop.hbase.master">MasterMobCompactionThread</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.388">mobCompactThread</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterMobCompactionThread.html" title="class in org.apache.hadoop.hbase.master">MasterMobCompactionThread</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.389">mobCompactThread</a></pre>
 </li>
 </ul>
 <a name="mobCompactionLock">
@@ -1896,7 +1896,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>mobCompactionLock</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/IdLock.html" title="class in org.apache.hadoop.hbase.util">IdLock</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.390">mobCompactionLock</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/IdLock.html" title="class in org.apache.hadoop.hbase.util">IdLock</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.391">mobCompactionLock</a></pre>
 </li>
 </ul>
 <a name="mobCompactionStates">
@@ -1905,7 +1905,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>mobCompactionStates</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicInteger</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.393">mobCompactionStates</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicInteger</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.394">mobCompactionStates</a></pre>
 </li>
 </ul>
 <a name="cpHost">
@@ -1914,7 +1914,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>cpHost</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/master/MasterCoprocessorHost.html" title="class in org.apache.hadoop.hbase.master">MasterCoprocessorHost</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.395">cpHost</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/master/MasterCoprocessorHost.html" title="class in org.apache.hadoop.hbase.master">MasterCoprocessorHost</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.396">cpHost</a></pre>
 </li>
 </ul>
 <a name="preLoadTableDescriptors">
@@ -1923,7 +1923,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>preLoadTableDescriptors</h4>
-<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.397">preLoadTableDescriptors</a></pre>
+<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.398">preLoadTableDescriptors</a></pre>
 </li>
 </ul>
 <a name="masterActiveTime">
@@ -1932,7 +1932,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>masterActiveTime</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.400">masterActiveTime</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.401">masterActiveTime</a></pre>
 </li>
 </ul>
 <a name="masterFinishedInitializationTime">
@@ -1941,7 +1941,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>masterFinishedInitializationTime</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.403">masterFinishedInitializationTime</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.404">masterFinishedInitializationTime</a></pre>
 </li>
 </ul>
 <a name="masterCheckCompression">
@@ -1950,7 +1950,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>masterCheckCompression</h4>
-<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.406">masterCheckCompression</a></pre>
+<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.407">masterCheckCompression</a></pre>
 </li>
 </ul>
 <a name="masterCheckEncryption">
@@ -1959,7 +1959,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>masterCheckEncryption</h4>
-<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.409">masterCheckEncryption</a></pre>
+<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.410">masterCheckEncryption</a></pre>
 </li>
 </ul>
 <a name="coprocessorServiceHandlers">
@@ -1968,7 +1968,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>coprocessorServiceHandlers</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>,com.google.protobuf.Service&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.411">coprocessorServiceHandlers</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>,com.google.protobuf.Service&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.412">coprocessorServiceHandlers</a></pre>
 </li>
 </ul>
 <a name="snapshotManager">
@@ -1977,7 +1977,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>snapshotManager</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html" title="class in org.apache.hadoop.hbase.master.snapshot">SnapshotManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.414">snapshotManager</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html" title="class in org.apache.hadoop.hbase.master.snapshot">SnapshotManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.415">snapshotManager</a></pre>
 </li>
 </ul>
 <a name="mpmHost">
@@ -1986,7 +1986,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>mpmHost</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.html" title="class in org.apache.hadoop.hbase.procedure">MasterProcedureManagerHost</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.416">mpmHost</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.html" title="class in org.apache.hadoop.hbase.procedure">MasterProcedureManagerHost</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.417">mpmHost</a></pre>
 </li>
 </ul>
 <a name="quotaManager">
@@ -1995,7 +1995,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>quotaManager</h4>
-<pre>private volatile&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/MasterQuotaManager.html" title="class in org.apache.hadoop.hbase.quotas">MasterQuotaManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.419">quotaManager</a></pre>
+<pre>private volatile&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/MasterQuotaManager.html" title="class in org.apache.hadoop.hbase.quotas">MasterQuotaManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.420">quotaManager</a></pre>
 </li>
 </ul>
 <a name="spaceQuotaSnapshotNotifier">
@@ -2004,7 +2004,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>spaceQuotaSnapshotNotifier</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifier.html" title="interface in org.apache.hadoop.hbase.quotas">SpaceQuotaSnapshotNotifier</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.420">spaceQuotaSnapshotNotifier</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifier.html" title="interface in org.apache.hadoop.hbase.quotas">SpaceQuotaSnapshotNotifier</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.421">spaceQuotaSnapshotNotifier</a></pre>
 </li>
 </ul>
 <a name="quotaObserverChore">
@@ -2013,7 +2013,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>quotaObserverChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaObserverChore.html" title="class in org.apache.hadoop.hbase.quotas">QuotaObserverChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.421">quotaObserverChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaObserverChore.html" title="class in org.apache.hadoop.hbase.quotas">QuotaObserverChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.422">quotaObserverChore</a></pre>
 </li>
 </ul>
 <a name="snapshotQuotaChore">
@@ -2022,7 +2022,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>snapshotQuotaChore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html" title="class in org.apache.hadoop.hbase.quotas">SnapshotQuotaObserverChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.422">snapshotQuotaChore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.html" title="class in org.apache.hadoop.hbase.quotas">SnapshotQuotaObserverChore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.423">snapshotQuotaChore</a></pre>
 </li>
 </ul>
 <a name="procedureExecutor">
@@ -2031,7 +2031,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>procedureExecutor</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html" title="class in org.apache.hadoop.hbase.procedure2">ProcedureExecutor</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html" title="class in org.apache.hadoop.hbase.master.procedure">MasterProcedureEnv</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.424">procedureExecutor</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html" title="class in org.apache.hadoop.hbase.procedure2">ProcedureExecutor</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html" title="class in org.apache.hadoop.hbase.master.procedure">MasterProcedureEnv</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.425">procedureExecutor</a></pre>
 </li>
 </ul>
 <a name="procedureStore">
@@ -2040,7 +2040,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>procedureStore</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html" title="class in org.apache.hadoop.hbase.procedure2.store.wal">WALProcedureStore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.425">procedureStore</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html" title="class in org.apache.hadoop.hbase.procedure2.store.wal">WALProcedureStore</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.426">procedureStore</a></pre>
 </li>
 </ul>
 <a name="tableStateManager">
@@ -2049,7 +2049,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>tableStateManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/TableStateManager.html" title="class in org.apache.hadoop.hbase.master">TableStateManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.428">tableStateManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/TableStateManager.html" title="class in org.apache.hadoop.hbase.master">TableStateManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.429">tableStateManager</a></pre>
 </li>
 </ul>
 <a name="splitPlanCount">
@@ -2058,7 +2058,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>splitPlanCount</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.430">splitPlanCount</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.431">splitPlanCount</a></pre>
 </li>
 </ul>
 <a name="mergePlanCount">
@@ -2067,7 +2067,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>mergePlanCount</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.431">mergePlanCount</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.432">mergePlanCount</a></pre>
 </li>
 </ul>
 <a name="favoredNodesManager">
@@ -2076,7 +2076,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>favoredNodesManager</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/favored/FavoredNodesManager.html" title="class in org.apache.hadoop.hbase.favored">FavoredNodesManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.434">favoredNodesManager</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/favored/FavoredNodesManager.html" title="class in org.apache.hadoop.hbase.favored">FavoredNodesManager</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.435">favoredNodesManager</a></pre>
 </li>
 </ul>
 <a name="masterJettyServer">
@@ -2085,7 +2085,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>masterJettyServer</h4>
-<pre>private&nbsp;org.eclipse.jetty.server.Server <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.437">masterJettyServer</a></pre>
+<pre>private&nbsp;org.eclipse.jetty.server.Server <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.438">masterJettyServer</a></pre>
 <div class="block">jetty server for master to redirect requests to regionserver infoServer</div>
 </li>
 </ul>
@@ -2095,7 +2095,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockListLast">
 <li class="blockList">
 <h4>UNSUPPORTED_PROCEDURES</h4>
-<pre>private static final&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a>&lt;?&gt;&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.806">UNSUPPORTED_PROCEDURES</a></pre>
+<pre>private static final&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a>&lt;?&gt;&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.807">UNSUPPORTED_PROCEDURES</a></pre>
 </li>
 </ul>
 </li>
@@ -2112,7 +2112,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HMaster</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.493">HMaster</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.494">HMaster</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)
         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>,
                org.apache.zookeeper.KeeperException</pre>
 <div class="block">Initializes the HMaster. The steps are as follows:
@@ -2146,7 +2146,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getUseThisHostnameInstead</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.564">getUseThisHostnameInstead</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.565">getUseThisHostnameInstead</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#getUseThisHostnameInstead-org.apache.hadoop.conf.Configuration-">getUseThisHostnameInstead</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2159,7 +2159,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>run</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.571">run</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.572">run</a>()</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#run--">HRegionServer</a></code></span></div>
 <div class="block">The HRegionServer sticks in this loop until closed.</div>
 <dl>
@@ -2176,7 +2176,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>putUpJettyServer</h4>
-<pre>private&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.609">putUpJettyServer</a>()
+<pre>private&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.610">putUpJettyServer</a>()
                       throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -2190,7 +2190,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getMetaTableObserver</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true" title="class or interface in java.util.function">Function</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptorBuilder.html" title="class in org.apache.hadoop.hbase.client">TableDescriptorBuilder</a>,<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptorBuilder.html" title="class in org.apache.hadoop.hbase.client">TableDescriptorBuilder</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.659">getMetaTableObserver</a>()</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true" title="class or interface in java.util.function">Function</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptorBuilder.html" title="class in org.apache.hadoop.hbase.client">TableDescriptorBuilder</a>,<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptorBuilder.html" title="class in org.apache.hadoop.hbase.client">TableDescriptorBuilder</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.660">getMetaTableObserver</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#getMetaTableObserver--">getMetaTableObserver</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2203,7 +2203,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>login</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.666">login</a>(<a href="../../../../../org/apache/hadoop/hbase/security/UserProvider.html" title="class in org.apache.hadoop.hbase.security">UserProvider</a>&nbsp;user,
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.667">login</a>(<a href="../../../../../org/apache/hadoop/hbase/security/UserProvider.html" title="class in org.apache.hadoop.hbase.security">UserProvider</a>&nbsp;user,
                      <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;host)
               throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">For compatibility, if failed with regionserver credentials, try the master one</div>
@@ -2221,7 +2221,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>waitForMasterActive</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.681">waitForMasterActive</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.682">waitForMasterActive</a>()</pre>
 <div class="block">If configured to put regions on active master,
  wait till a backup master becomes active.
  Otherwise, loop till the server is stopped or aborted.</div>
@@ -2237,7 +2237,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getMasterRpcServices</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.html" title="class in org.apache.hadoop.hbase.master">MasterRpcServices</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.689">getMasterRpcServices</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.html" title="class in org.apache.hadoop.hbase.master">MasterRpcServices</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.690">getMasterRpcServices</a>()</pre>
 </li>
 </ul>
 <a name="balanceSwitch-boolean-">
@@ -2246,7 +2246,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>balanceSwitch</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.693">balanceSwitch</a>(boolean&nbsp;b)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.694">balanceSwitch</a>(boolean&nbsp;b)
                       throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -2260,7 +2260,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getProcessName</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.698">getProcessName</a>()</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.699">getProcessName</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#getProcessName--">getProcessName</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2273,7 +2273,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>canCreateBaseZNode</h4>
-<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.703">canCreateBaseZNode</a>()</pre>
+<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.704">canCreateBaseZNode</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#canCreateBaseZNode--">canCreateBaseZNode</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2286,7 +2286,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>canUpdateTableDescriptor</h4>
-<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.708">canUpdateTableDescriptor</a>()</pre>
+<pre>protected&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.709">canUpdateTableDescriptor</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#canUpdateTableDescriptor--">canUpdateTableDescriptor</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2299,7 +2299,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>createRpcServices</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/RSRpcServices.html" title="class in org.apache.hadoop.hbase.regionserver">RSRpcServices</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.713">createRpcServices</a>()
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/regionserver/RSRpcServices.html" title="class in org.apache.hadoop.hbase.regionserver">RSRpcServices</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.714">createRpcServices</a>()
                                    throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
@@ -2315,7 +2315,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>configureInfoServer</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.718">configureInfoServer</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.719">configureInfoServer</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#configureInfoServer--">configureInfoServer</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2328,7 +2328,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getDumpServlet</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a>&lt;? extends javax.servlet.http.HttpServlet&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.727">getDumpServlet</a>()</pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a>&lt;? extends javax.servlet.http.HttpServlet&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.728">getDumpServlet</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#getDumpServlet--">getDumpServlet</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html" title="class in org.apache.hadoop.hbase.regionserver">HRegionServer</a></code></dd>
@@ -2341,7 +2341,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getMasterMetrics</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMaster.html" title="class in org.apache.hadoop.hbase.master">MetricsMaster</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.732">getMasterMetrics</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMaster.html" title="class in org.apache.hadoop.hbase.master">MetricsMaster</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.733">getMasterMetrics</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html#getMasterMetrics--">getMasterMetrics</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></code></dd>
@@ -2356,7 +2356,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>initializeZKBasedSystemTrackers</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.746">initializeZKBasedSystemTrackers</a>()
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.747">initializeZKBasedSystemTrackers</a>()
                                         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>,
                                                <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true" title="class or interface in java.lang">InterruptedException</a>,
                                                org.apache.zookeeper.KeeperException,
@@ -2383,7 +2383,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>checkUnsupportedProcedure</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.816">checkUnsupportedProcedure</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html" title="class in org.apache.hadoop.hbase.procedure2">Procedure</a>&gt;,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html" title="class in org.apache.hadoop.hbase.procedure2">Procedure</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html" title="class in org.apache.hadoop.hbase.
 master.procedure">MasterProcedureEnv</a>&gt;&gt;&gt;&nbsp;procsByType)
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.817">checkUnsupportedProcedure</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true" title="class or interface in java.lang">Class</a>&lt;? extends <a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html" title="class in org.apache.hadoop.hbase.procedure2">Procedure</a>&gt;,<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html" title="class in org.apache.hadoop.hbase.procedure2">Procedure</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html" title="class in org.apache.hadoop.hbase.
 master.procedure">MasterProcedureEnv</a>&gt;&gt;&gt;&nbsp;procsByType)
                                 throws <a href="../../../../../org/apache/hadoop/hbase/HBaseIOException.html" title="class in org.apache.hadoop.hbase">HBaseIOException</a></pre>
 <div class="block">In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is
  incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to
@@ -2401,7 +2401,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>finishActiveMasterInitialization</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.882">finishActiveMasterInitialization</a>(<a href="../../../../../org/apache/hadoop/hbase/monitoring/MonitoredTask.html" title="interface in org.apache.hadoop.hbase.monitoring">MonitoredTask</a>&nbsp;status)
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.883">finishActiveMasterInitialization</a>(<a href="../../../../../org/apache/hadoop/hbase/monitoring/MonitoredTask.html" title="interface in org.apache.hadoop.hbase.monitoring">MonitoredTask</a>&nbsp;status)
                                        throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>,
                                               <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true" title="class or interface in java.lang">InterruptedException</a>,
                                               org.apache.zookeeper.KeeperException,
@@ -2456,7 +2456,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>updateConfigurationForQuotasObserver</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1136">updateConfigurationForQuotasObserver</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1145">updateConfigurationForQuotasObserver</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
 <div class="block">Adds the <code>MasterQuotasObserver</code> to the list of configured Master observers to
  automatically remove quotas for a table when that table is deleted.</div>
 </li>
@@ -2467,7 +2467,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>initMobCleaner</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1153">initMobCleaner</a>()</pre>
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1162">initMobCleaner</a>()</pre>
 </li>
 </ul>
 <a name="createMetaBootstrap--">
@@ -2476,7 +2476,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>createMetaBootstrap</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterMetaBootstrap.html" title="class in org.apache.hadoop.hbase.master">MasterMetaBootstrap</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1178">createMetaBootstrap</a>()</pre>
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterMetaBootstrap.html" title="class in org.apache.hadoop.hbase.master">MasterMetaBootstrap</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1187">createMetaBootstrap</a>()</pre>
 <div class="block"><p>
  Create a <a href="../../../../../org/apache/hadoop/hbase/master/MasterMetaBootstrap.html" title="class in org.apache.hadoop.hbase.master"><code>MasterMetaBootstrap</code></a> instance.
  </p>
@@ -2491,7 +2491,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>createServerManager</h4>
-<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master">ServerManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1193">createServerManager</a>(<a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a>&nbsp;master)
+<pre>protected&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master">ServerManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1202">createServerManager</a>(<a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a>&nbsp;master)
                                      throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><p>
  Create a <a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master"><code>ServerManager</code></a> instance.
@@ -2511,7 +2511,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>waitForRegionServers</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1200">waitForRegionServers</a>(<a href="../../../../../org/apache/hadoop/hbase/monitoring/MonitoredTask.html" title="interface in org.apache.hadoop.hbase.monitoring">MonitoredTask</a>&nbsp;status)
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1209">waitForRegionServers</a>(<a href="../../../../../org/apache/hadoop/hbase/monitoring/MonitoredTask.html" title="interface in org.apache.hadoop.hbase.monitoring">MonitoredTask</a>&nbsp;status)
                            throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>,
                                   <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true" title="class or interface in java.lang">InterruptedException</a></pre>
 <dl>
@@ -2527,7 +2527,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>initClusterSchemaService</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1207">initClusterSchemaService</a>()
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1216">initClusterSchemaService</a>()
                                  throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>,
                                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true" title="class or interface in java.lang">InterruptedException</a></pre>
 <dl>
@@ -2543,7 +2543,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>initQuotaManager</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1219">initQuotaManager</a>()
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1228">initQuotaManager</a>()
                        throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -2557,7 +2557,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>createQuotaSnapshotNotifier</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifier.html" title="interface in org.apache.hadoop.hbase.quotas">SpaceQuotaSnapshotNotifier</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1225">createQuotaSnapshotNotifier</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifier.html" title="interface in org.apache.hadoop.hbase.quotas">SpaceQuotaSnapshotNotifier</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1234">createQuotaSnapshotNotifier</a>()</pre>
 </li>
 </ul>
 <a name="isCatalogJanitorEnabled--">
@@ -2566,7 +2566,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>isCatalogJanitorEnabled</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1231">isCatalogJanitorEnabled</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1240">isCatalogJanitorEnabled</a>()</pre>
 </li>
 </ul>
 <a name="isCleanerChoreEnabled--">
@@ -2575,7 +2575,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>isCleanerChoreEnabled</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1236">isCleanerChoreEnabled</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1245">isCleanerChoreEnabled</a>()</pre>
 </li>
 </ul>
 <a name="getTableDescriptors--">
@@ -2584,7 +2584,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getTableDescriptors</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableDescriptors.html" title="interface in org.apache.hadoop.hbase">TableDescriptors</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1251">getTableDescriptors</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableDescriptors.html" title="interface in org.apache.hadoop.hbase">TableDescriptors</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1260">getTableDescriptors</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html#getTableDescriptors--">getTableDescriptors</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></code></dd>
@@ -2601,7 +2601,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getServerManager</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master">ServerManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1256">getServerManager</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/ServerManager.html" title="class in org.apache.hadoop.hbase.master">ServerManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1265">getServerManager</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html#getServerManager--">getServerManager</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></code></dd>
@@ -2616,7 +2616,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getMasterFileSystem</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterFileSystem.html" title="class in org.apache.hadoop.hbase.master">MasterFileSystem</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1261">getMasterFileSystem</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterFileSystem.html" title="class in org.apache.hadoop.hbase.master">MasterFileSystem</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1270">getMasterFileSystem</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html#getMasterFileSystem--">getMasterFileSystem</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></code></dd>
@@ -2631,7 +2631,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getMasterWalManager</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterWalManager.html" title="class in org.apache.hadoop.hbase.master">MasterWalManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1266">getMasterWalManager</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/MasterWalManager.html" title="class in org.apache.hadoop.hbase.master">MasterWalManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1275">getMasterWalManager</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html#getMasterWalManager--">getMasterWalManager</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></code></dd>
@@ -2646,7 +2646,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>getTableStateManager</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/TableStateManager.html" title="class in org.apache.hadoop.hbase.master">TableStateManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1271">getTableStateManager</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/TableStateManager.html" title="class in org.apache.hadoop.hbase.master">TableStateManager</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1280">getTableStateManager</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html#getTableStateManager--">getTableStateManager</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/master/MasterServices.html" title="interface in org.apache.hadoop.hbase.master">MasterServices</a></code></dd>
@@ -2661,7 +2661,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>startServiceThreads</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1282">startServiceThreads</a>()
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1291">startServiceThreads</a>()
                           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -2675,7 +2675,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>stopServiceThreads</h4>
-<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1332">stopServiceThreads</a>()</pre>
+<pre>protected&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1341">stopServiceThreads</a>()</pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#stopServiceThreads--">HRegionServer</a></code></span></div>
 <div class="block">Wait on all threads to finish. Presumption is that all closes and stops
  have already been called.</div>
@@ -2691,7 +2691,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>createProcedureExecutor</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1380">createProcedureExecutor</a>()
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1389">createProcedureExecutor</a>()
                               throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -2705,7 +2705,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>startProcedureExecutor</h4>
-<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1403">startProcedureExecutor</a>()
+<pre>private&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.1412">startProcedureExecutor</a>()
                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -2719,7 +2719,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/master/MasterServices
 <ul class="blockList">
 <li class="blockList">
 <h4>stopProcedureEx

<TRUNCATED>

[26/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 7df71bd..a990153 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -201,3634 +201,3643 @@
 <span class="sourceLineNo">193</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.193"></a>
 <span class="sourceLineNo">194</span>import org.apache.hadoop.hbase.util.CompressionTest;<a name="line.194"></a>
 <span class="sourceLineNo">195</span>import org.apache.hadoop.hbase.util.EncryptionTest;<a name="line.195"></a>
-<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.196"></a>
-<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.197"></a>
-<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.199"></a>
-<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.200"></a>
-<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.201"></a>
-<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.203"></a>
-<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.204"></a>
-<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.206"></a>
-<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.210"></a>
-<span class="sourceLineNo">211</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.211"></a>
-<span class="sourceLineNo">212</span>import org.apache.zookeeper.KeeperException;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>import org.eclipse.jetty.server.Server;<a name="line.213"></a>
-<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.215"></a>
-<span class="sourceLineNo">216</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>import org.slf4j.Logger;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>import org.slf4j.LoggerFactory;<a name="line.218"></a>
-<span class="sourceLineNo">219</span><a name="line.219"></a>
-<span class="sourceLineNo">220</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.222"></a>
-<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.223"></a>
-<span class="sourceLineNo">224</span><a name="line.224"></a>
-<span class="sourceLineNo">225</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.226"></a>
-<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.227"></a>
-<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.230"></a>
-<span class="sourceLineNo">231</span><a name="line.231"></a>
-<span class="sourceLineNo">232</span>/**<a name="line.232"></a>
-<span class="sourceLineNo">233</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.233"></a>
-<span class="sourceLineNo">234</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.234"></a>
-<span class="sourceLineNo">235</span> * run the cluster.  All others park themselves in their constructor until<a name="line.235"></a>
-<span class="sourceLineNo">236</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.236"></a>
-<span class="sourceLineNo">237</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.237"></a>
-<span class="sourceLineNo">238</span> *<a name="line.238"></a>
-<span class="sourceLineNo">239</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.239"></a>
-<span class="sourceLineNo">240</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.240"></a>
-<span class="sourceLineNo">241</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.241"></a>
-<span class="sourceLineNo">242</span> *<a name="line.242"></a>
-<span class="sourceLineNo">243</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.243"></a>
-<span class="sourceLineNo">244</span> *<a name="line.244"></a>
-<span class="sourceLineNo">245</span> * @see org.apache.zookeeper.Watcher<a name="line.245"></a>
-<span class="sourceLineNo">246</span> */<a name="line.246"></a>
-<span class="sourceLineNo">247</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.247"></a>
-<span class="sourceLineNo">248</span>@SuppressWarnings("deprecation")<a name="line.248"></a>
-<span class="sourceLineNo">249</span>public class HMaster extends HRegionServer implements MasterServices {<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private static Logger LOG = LoggerFactory.getLogger(HMaster.class);<a name="line.250"></a>
-<span class="sourceLineNo">251</span><a name="line.251"></a>
-<span class="sourceLineNo">252</span>  /**<a name="line.252"></a>
-<span class="sourceLineNo">253</span>   * Protection against zombie master. Started once Master accepts active responsibility and<a name="line.253"></a>
-<span class="sourceLineNo">254</span>   * starts taking over responsibilities. Allows a finite time window before giving up ownership.<a name="line.254"></a>
-<span class="sourceLineNo">255</span>   */<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private static class InitializationMonitor extends HasThread {<a name="line.256"></a>
-<span class="sourceLineNo">257</span>    /** The amount of time in milliseconds to sleep before checking initialization status. */<a name="line.257"></a>
-<span class="sourceLineNo">258</span>    public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";<a name="line.258"></a>
-<span class="sourceLineNo">259</span>    public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);<a name="line.259"></a>
-<span class="sourceLineNo">260</span><a name="line.260"></a>
-<span class="sourceLineNo">261</span>    /**<a name="line.261"></a>
-<span class="sourceLineNo">262</span>     * When timeout expired and initialization has not complete, call {@link System#exit(int)} when<a name="line.262"></a>
-<span class="sourceLineNo">263</span>     * true, do nothing otherwise.<a name="line.263"></a>
-<span class="sourceLineNo">264</span>     */<a name="line.264"></a>
-<span class="sourceLineNo">265</span>    public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";<a name="line.265"></a>
-<span class="sourceLineNo">266</span>    public static final boolean HALT_DEFAULT = false;<a name="line.266"></a>
-<span class="sourceLineNo">267</span><a name="line.267"></a>
-<span class="sourceLineNo">268</span>    private final HMaster master;<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    private final long timeout;<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    private final boolean haltOnTimeout;<a name="line.270"></a>
-<span class="sourceLineNo">271</span><a name="line.271"></a>
-<span class="sourceLineNo">272</span>    /** Creates a Thread that monitors the {@link #isInitialized()} state. */<a name="line.272"></a>
-<span class="sourceLineNo">273</span>    InitializationMonitor(HMaster master) {<a name="line.273"></a>
-<span class="sourceLineNo">274</span>      super("MasterInitializationMonitor");<a name="line.274"></a>
-<span class="sourceLineNo">275</span>      this.master = master;<a name="line.275"></a>
-<span class="sourceLineNo">276</span>      this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);<a name="line.276"></a>
-<span class="sourceLineNo">277</span>      this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);<a name="line.277"></a>
-<span class="sourceLineNo">278</span>      this.setDaemon(true);<a name="line.278"></a>
-<span class="sourceLineNo">279</span>    }<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>    @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>    public void run() {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>      try {<a name="line.283"></a>
-<span class="sourceLineNo">284</span>        while (!master.isStopped() &amp;&amp; master.isActiveMaster()) {<a name="line.284"></a>
-<span class="sourceLineNo">285</span>          Thread.sleep(timeout);<a name="line.285"></a>
-<span class="sourceLineNo">286</span>          if (master.isInitialized()) {<a name="line.286"></a>
-<span class="sourceLineNo">287</span>            LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");<a name="line.287"></a>
-<span class="sourceLineNo">288</span>          } else {<a name="line.288"></a>
-<span class="sourceLineNo">289</span>            LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"<a name="line.289"></a>
-<span class="sourceLineNo">290</span>                + " consider submitting a bug report including a thread dump of this process.");<a name="line.290"></a>
-<span class="sourceLineNo">291</span>            if (haltOnTimeout) {<a name="line.291"></a>
-<span class="sourceLineNo">292</span>              LOG.error("Zombie Master exiting. Thread dump to stdout");<a name="line.292"></a>
-<span class="sourceLineNo">293</span>              Threads.printThreadInfo(System.out, "Zombie HMaster");<a name="line.293"></a>
-<span class="sourceLineNo">294</span>              System.exit(-1);<a name="line.294"></a>
-<span class="sourceLineNo">295</span>            }<a name="line.295"></a>
-<span class="sourceLineNo">296</span>          }<a name="line.296"></a>
-<span class="sourceLineNo">297</span>        }<a name="line.297"></a>
-<span class="sourceLineNo">298</span>      } catch (InterruptedException ie) {<a name="line.298"></a>
-<span class="sourceLineNo">299</span>        LOG.trace("InitMonitor thread interrupted. Existing.");<a name="line.299"></a>
-<span class="sourceLineNo">300</span>      }<a name="line.300"></a>
-<span class="sourceLineNo">301</span>    }<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  }<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  // MASTER is name of the webapp and the attribute name used stuffing this<a name="line.304"></a>
-<span class="sourceLineNo">305</span>  //instance into web context.<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  public static final String MASTER = "master";<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  // Manager and zk listener for master election<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private final ActiveMasterManager activeMasterManager;<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Region server tracker<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private RegionServerTracker regionServerTracker;<a name="line.311"></a>
-<span class="sourceLineNo">312</span>  // Draining region server tracker<a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private DrainingServerTracker drainingServerTracker;<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  // Tracker for load balancer state<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  LoadBalancerTracker loadBalancerTracker;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  // Tracker for meta location, if any client ZK quorum specified<a name="line.316"></a>
-<span class="sourceLineNo">317</span>  MetaLocationSyncer metaLocationSyncer;<a name="line.317"></a>
-<span class="sourceLineNo">318</span>  // Tracker for active master location, if any client ZK quorum specified<a name="line.318"></a>
-<span class="sourceLineNo">319</span>  MasterAddressSyncer masterAddressSyncer;<a name="line.319"></a>
-<span class="sourceLineNo">320</span><a name="line.320"></a>
-<span class="sourceLineNo">321</span>  // Tracker for split and merge state<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private SplitOrMergeTracker splitOrMergeTracker;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  // Tracker for region normalizer state<a name="line.324"></a>
-<span class="sourceLineNo">325</span>  private RegionNormalizerTracker regionNormalizerTracker;<a name="line.325"></a>
-<span class="sourceLineNo">326</span><a name="line.326"></a>
-<span class="sourceLineNo">327</span>  //Tracker for master maintenance mode setting<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private MasterMaintenanceModeTracker maintenanceModeTracker;<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private ClusterSchemaService clusterSchemaService;<a name="line.330"></a>
-<span class="sourceLineNo">331</span><a name="line.331"></a>
-<span class="sourceLineNo">332</span>  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    "hbase.master.wait.on.service.seconds";<a name="line.333"></a>
-<span class="sourceLineNo">334</span>  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;<a name="line.334"></a>
-<span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  // Metrics for the HMaster<a name="line.336"></a>
-<span class="sourceLineNo">337</span>  final MetricsMaster metricsMaster;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  // file system manager for the master FS operations<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private MasterFileSystem fileSystemManager;<a name="line.339"></a>
-<span class="sourceLineNo">340</span>  private MasterWalManager walManager;<a name="line.340"></a>
-<span class="sourceLineNo">341</span><a name="line.341"></a>
-<span class="sourceLineNo">342</span>  // server manager to deal with region server info<a name="line.342"></a>
-<span class="sourceLineNo">343</span>  private volatile ServerManager serverManager;<a name="line.343"></a>
-<span class="sourceLineNo">344</span><a name="line.344"></a>
-<span class="sourceLineNo">345</span>  // manager of assignment nodes in zookeeper<a name="line.345"></a>
-<span class="sourceLineNo">346</span>  private AssignmentManager assignmentManager;<a name="line.346"></a>
-<span class="sourceLineNo">347</span><a name="line.347"></a>
-<span class="sourceLineNo">348</span>  // manager of replication<a name="line.348"></a>
-<span class="sourceLineNo">349</span>  private ReplicationPeerManager replicationPeerManager;<a name="line.349"></a>
-<span class="sourceLineNo">350</span><a name="line.350"></a>
-<span class="sourceLineNo">351</span>  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;<a name="line.351"></a>
-<span class="sourceLineNo">352</span><a name="line.352"></a>
-<span class="sourceLineNo">353</span>  // buffer for "fatal error" notices from region servers<a name="line.353"></a>
-<span class="sourceLineNo">354</span>  // in the cluster. This is only used for assisting<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  // operations/debugging.<a name="line.355"></a>
-<span class="sourceLineNo">356</span>  MemoryBoundedLogMessageBuffer rsFatals;<a name="line.356"></a>
-<span class="sourceLineNo">357</span><a name="line.357"></a>
-<span class="sourceLineNo">358</span>  // flag set after we become the active master (used for testing)<a name="line.358"></a>
-<span class="sourceLineNo">359</span>  private volatile boolean activeMaster = false;<a name="line.359"></a>
-<span class="sourceLineNo">360</span><a name="line.360"></a>
-<span class="sourceLineNo">361</span>  // flag set after we complete initialization once active<a name="line.361"></a>
-<span class="sourceLineNo">362</span>  private final ProcedureEvent&lt;?&gt; initialized = new ProcedureEvent&lt;&gt;("master initialized");<a name="line.362"></a>
-<span class="sourceLineNo">363</span><a name="line.363"></a>
-<span class="sourceLineNo">364</span>  // flag set after master services are started,<a name="line.364"></a>
-<span class="sourceLineNo">365</span>  // initialization may have not completed yet.<a name="line.365"></a>
-<span class="sourceLineNo">366</span>  volatile boolean serviceStarted = false;<a name="line.366"></a>
-<span class="sourceLineNo">367</span><a name="line.367"></a>
-<span class="sourceLineNo">368</span>  // Maximum time we should run balancer for<a name="line.368"></a>
-<span class="sourceLineNo">369</span>  private final int maxBlancingTime;<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  // Maximum percent of regions in transition when balancing<a name="line.370"></a>
-<span class="sourceLineNo">371</span>  private final double maxRitPercent;<a name="line.371"></a>
-<span class="sourceLineNo">372</span><a name="line.372"></a>
-<span class="sourceLineNo">373</span>  private final LockManager lockManager = new LockManager(this);<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>  private LoadBalancer balancer;<a name="line.375"></a>
-<span class="sourceLineNo">376</span>  private RegionNormalizer normalizer;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>  private BalancerChore balancerChore;<a name="line.377"></a>
-<span class="sourceLineNo">378</span>  private RegionNormalizerChore normalizerChore;<a name="line.378"></a>
-<span class="sourceLineNo">379</span>  private ClusterStatusChore clusterStatusChore;<a name="line.379"></a>
-<span class="sourceLineNo">380</span>  private ClusterStatusPublisher clusterStatusPublisherChore = null;<a name="line.380"></a>
-<span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  CatalogJanitor catalogJanitorChore;<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  private LogCleaner logCleaner;<a name="line.383"></a>
-<span class="sourceLineNo">384</span>  private HFileCleaner hfileCleaner;<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  private ReplicationBarrierCleaner replicationBarrierCleaner;<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  private MobCompactionChore mobCompactChore;<a name="line.387"></a>
-<span class="sourceLineNo">388</span>  private MasterMobCompactionThread mobCompactThread;<a name="line.388"></a>
-<span class="sourceLineNo">389</span>  // used to synchronize the mobCompactionStates<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  private final IdLock mobCompactionLock = new IdLock();<a name="line.390"></a>
-<span class="sourceLineNo">391</span>  // save the information of mob compactions in tables.<a name="line.391"></a>
-<span class="sourceLineNo">392</span>  // the key is table name, the value is the number of compactions in that table.<a name="line.392"></a>
-<span class="sourceLineNo">393</span>  private Map&lt;TableName, AtomicInteger&gt; mobCompactionStates = Maps.newConcurrentMap();<a name="line.393"></a>
-<span class="sourceLineNo">394</span><a name="line.394"></a>
-<span class="sourceLineNo">395</span>  MasterCoprocessorHost cpHost;<a name="line.395"></a>
-<span class="sourceLineNo">396</span><a name="line.396"></a>
-<span class="sourceLineNo">397</span>  private final boolean preLoadTableDescriptors;<a name="line.397"></a>
-<span class="sourceLineNo">398</span><a name="line.398"></a>
-<span class="sourceLineNo">399</span>  // Time stamps for when a hmaster became active<a name="line.399"></a>
-<span class="sourceLineNo">400</span>  private long masterActiveTime;<a name="line.400"></a>
-<span class="sourceLineNo">401</span><a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // Time stamp for when HMaster finishes becoming Active Master<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  private long masterFinishedInitializationTime;<a name="line.403"></a>
-<span class="sourceLineNo">404</span><a name="line.404"></a>
-<span class="sourceLineNo">405</span>  //should we check the compression codec type at master side, default true, HBASE-6370<a name="line.405"></a>
-<span class="sourceLineNo">406</span>  private final boolean masterCheckCompression;<a name="line.406"></a>
-<span class="sourceLineNo">407</span><a name="line.407"></a>
-<span class="sourceLineNo">408</span>  //should we check encryption settings at master side, default true<a name="line.408"></a>
-<span class="sourceLineNo">409</span>  private final boolean masterCheckEncryption;<a name="line.409"></a>
-<span class="sourceLineNo">410</span><a name="line.410"></a>
-<span class="sourceLineNo">411</span>  Map&lt;String, Service&gt; coprocessorServiceHandlers = Maps.newHashMap();<a name="line.411"></a>
-<span class="sourceLineNo">412</span><a name="line.412"></a>
-<span class="sourceLineNo">413</span>  // monitor for snapshot of hbase tables<a name="line.413"></a>
-<span class="sourceLineNo">414</span>  SnapshotManager snapshotManager;<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  // monitor for distributed procedures<a name="line.415"></a>
-<span class="sourceLineNo">416</span>  private MasterProcedureManagerHost mpmHost;<a name="line.416"></a>
-<span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  // it is assigned after 'initialized' guard set to true, so should be volatile<a name="line.418"></a>
-<span class="sourceLineNo">419</span>  private volatile MasterQuotaManager quotaManager;<a name="line.419"></a>
-<span class="sourceLineNo">420</span>  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  private QuotaObserverChore quotaObserverChore;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  private SnapshotQuotaObserverChore snapshotQuotaChore;<a name="line.422"></a>
-<span class="sourceLineNo">423</span><a name="line.423"></a>
-<span class="sourceLineNo">424</span>  private ProcedureExecutor&lt;MasterProcedureEnv&gt; procedureExecutor;<a name="line.424"></a>
-<span class="sourceLineNo">425</span>  private WALProcedureStore procedureStore;<a name="line.425"></a>
-<span class="sourceLineNo">426</span><a name="line.426"></a>
-<span class="sourceLineNo">427</span>  // handle table states<a name="line.427"></a>
-<span class="sourceLineNo">428</span>  private TableStateManager tableStateManager;<a name="line.428"></a>
-<span class="sourceLineNo">429</span><a name="line.429"></a>
-<span class="sourceLineNo">430</span>  private long splitPlanCount;<a name="line.430"></a>
-<span class="sourceLineNo">431</span>  private long mergePlanCount;<a name="line.431"></a>
-<span class="sourceLineNo">432</span><a name="line.432"></a>
-<span class="sourceLineNo">433</span>  /* Handle favored nodes information */<a name="line.433"></a>
-<span class="sourceLineNo">434</span>  private FavoredNodesManager favoredNodesManager;<a name="line.434"></a>
-<span class="sourceLineNo">435</span><a name="line.435"></a>
-<span class="sourceLineNo">436</span>  /** jetty server for master to redirect requests to regionserver infoServer */<a name="line.436"></a>
-<span class="sourceLineNo">437</span>  private Server masterJettyServer;<a name="line.437"></a>
-<span class="sourceLineNo">438</span><a name="line.438"></a>
-<span class="sourceLineNo">439</span>  public static class RedirectServlet extends HttpServlet {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    private static final long serialVersionUID = 2894774810058302473L;<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    private final int regionServerInfoPort;<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    private final String regionServerHostname;<a name="line.442"></a>
-<span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>    /**<a name="line.444"></a>
-<span class="sourceLineNo">445</span>     * @param infoServer that we're trying to send all requests to<a name="line.445"></a>
-<span class="sourceLineNo">446</span>     * @param hostname may be null. if given, will be used for redirects instead of host from client.<a name="line.446"></a>
-<span class="sourceLineNo">447</span>     */<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    public RedirectServlet(InfoServer infoServer, String hostname) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>       regionServerInfoPort = infoServer.getPort();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>       regionServerHostname = hostname;<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    }<a name="line.451"></a>
-<span class="sourceLineNo">452</span><a name="line.452"></a>
-<span class="sourceLineNo">453</span>    @Override<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    public void doGet(HttpServletRequest request,<a name="line.454"></a>
-<span class="sourceLineNo">455</span>        HttpServletResponse response) throws ServletException, IOException {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      String redirectHost = regionServerHostname;<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      if(redirectHost == null) {<a name="line.457"></a>
-<span class="sourceLineNo">458</span>        redirectHost = request.getServerName();<a name="line.458"></a>
-<span class="sourceLineNo">459</span>        if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {<a name="line.459"></a>
-<span class="sourceLineNo">460</span>          LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +<a name="line.460"></a>
-<span class="sourceLineNo">461</span>              MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +<a name="line.461"></a>
-<span class="sourceLineNo">462</span>              "your HBase deployment relies on client accessible names that the region server process " +<a name="line.462"></a>
-<span class="sourceLineNo">463</span>              "can't resolve locally, then you should set the previously mentioned configuration variable " +<a name="line.463"></a>
-<span class="sourceLineNo">464</span>              "to an appropriate hostname.");<a name="line.464"></a>
-<span class="sourceLineNo">465</span>          // no sending client provided input back to the client, so the goal host is just in the logs.<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +<a name="line.466"></a>
-<span class="sourceLineNo">467</span>              "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          return;<a name="line.469"></a>
-<span class="sourceLineNo">470</span>        }<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      }<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      // host and port we're using, but it's buried way too deep to do that ATM.<a name="line.473"></a>
-<span class="sourceLineNo">474</span>      String redirectUrl = request.getScheme() + "://"<a name="line.474"></a>
-<span class="sourceLineNo">475</span>        + redirectHost + ":" + regionServerInfoPort<a name="line.475"></a>
-<span class="sourceLineNo">476</span>        + request.getRequestURI();<a name="line.476"></a>
-<span class="sourceLineNo">477</span>      response.sendRedirect(redirectUrl);<a name="line.477"></a>
-<span class="sourceLineNo">478</span>    }<a name="line.478"></a>
-<span class="sourceLineNo">479</span>  }<a name="line.479"></a>
-<span class="sourceLineNo">480</span><a name="line.480"></a>
-<span class="sourceLineNo">481</span>  /**<a name="line.481"></a>
-<span class="sourceLineNo">482</span>   * Initializes the HMaster. The steps are as follows:<a name="line.482"></a>
-<span class="sourceLineNo">483</span>   * &lt;p&gt;<a name="line.483"></a>
-<span class="sourceLineNo">484</span>   * &lt;ol&gt;<a name="line.484"></a>
-<span class="sourceLineNo">485</span>   * &lt;li&gt;Initialize the local HRegionServer<a name="line.485"></a>
-<span class="sourceLineNo">486</span>   * &lt;li&gt;Start the ActiveMasterManager.<a name="line.486"></a>
-<span class="sourceLineNo">487</span>   * &lt;/ol&gt;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>   * &lt;p&gt;<a name="line.488"></a>
-<span class="sourceLineNo">489</span>   * Remaining steps of initialization occur in<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * #finishActiveMasterInitialization(MonitoredTask) after<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * the master becomes the active one.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public HMaster(final Configuration conf)<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      throws IOException, KeeperException {<a name="line.494"></a>
-<span class="sourceLineNo">495</span>    super(conf);<a name="line.495"></a>
-<span class="sourceLineNo">496</span>    TraceUtil.initTracer(conf);<a name="line.496"></a>
-<span class="sourceLineNo">497</span>    try {<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      this.rsFatals = new MemoryBoundedLogMessageBuffer(<a name="line.498"></a>
-<span class="sourceLineNo">499</span>          conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      LOG.info("hbase.rootdir=" + getRootDir() +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>          ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));<a name="line.501"></a>
-<span class="sourceLineNo">502</span><a name="line.502"></a>
-<span class="sourceLineNo">503</span>      // Disable usage of meta replicas in the master<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      decorateMasterConfiguration(this.conf);<a name="line.506"></a>
-<span class="sourceLineNo">507</span><a name="line.507"></a>
-<span class="sourceLineNo">508</span>      // Hack! Maps DFSClient =&gt; Master for logs.  HDFS made this<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      // config param for task trackers, but we can piggyback off of it.<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      if (this.conf.get("mapreduce.task.attempt.id") == null) {<a name="line.510"></a>
-<span class="sourceLineNo">511</span>        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      }<a name="line.512"></a>
-<span class="sourceLineNo">513</span><a name="line.513"></a>
-<span class="sourceLineNo">514</span>      // should we check the compression codec type at master side, default true, HBASE-6370<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);<a name="line.515"></a>
-<span class="sourceLineNo">516</span><a name="line.516"></a>
-<span class="sourceLineNo">517</span>      // should we check encryption settings at master side, default true<a name="line.517"></a>
-<span class="sourceLineNo">518</span>      this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);<a name="line.518"></a>
-<span class="sourceLineNo">519</span><a name="line.519"></a>
-<span class="sourceLineNo">520</span>      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));<a name="line.520"></a>
-<span class="sourceLineNo">521</span><a name="line.521"></a>
-<span class="sourceLineNo">522</span>      // preload table descriptor at startup<a name="line.522"></a>
-<span class="sourceLineNo">523</span>      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>      this.maxBlancingTime = getMaxBalancingTime();<a name="line.525"></a>
-<span class="sourceLineNo">526</span>      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,<a name="line.526"></a>
-<span class="sourceLineNo">527</span>          HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);<a name="line.527"></a>
-<span class="sourceLineNo">528</span><a name="line.528"></a>
-<span class="sourceLineNo">529</span>      // Do we publish the status?<a name="line.529"></a>
-<span class="sourceLineNo">530</span><a name="line.530"></a>
-<span class="sourceLineNo">531</span>      boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,<a name="line.531"></a>
-<span class="sourceLineNo">532</span>          HConstants.STATUS_PUBLISHED_DEFAULT);<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      Class&lt;? extends ClusterStatusPublisher.Publisher&gt; publisherClass =<a name="line.533"></a>
-<span class="sourceLineNo">534</span>          conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,<a name="line.534"></a>
-<span class="sourceLineNo">535</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,<a name="line.535"></a>
-<span class="sourceLineNo">536</span>              ClusterStatusPublisher.Publisher.class);<a name="line.536"></a>
-<span class="sourceLineNo">537</span><a name="line.537"></a>
-<span class="sourceLineNo">538</span>      if (shouldPublish) {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        if (publisherClass == null) {<a name="line.539"></a>
-<span class="sourceLineNo">540</span>          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +<a name="line.540"></a>
-<span class="sourceLineNo">541</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +<a name="line.541"></a>
-<span class="sourceLineNo">542</span>              " is not set - not publishing status");<a name="line.542"></a>
-<span class="sourceLineNo">543</span>        } else {<a name="line.543"></a>
-<span class="sourceLineNo">544</span>          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>          getChoreService().scheduleChore(clusterStatusPublisherChore);<a name="line.545"></a>
-<span class="sourceLineNo">546</span>        }<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      }<a name="line.547"></a>
-<span class="sourceLineNo">548</span><a name="line.548"></a>
-<span class="sourceLineNo">549</span>      // Some unit tests don't need a cluster, so no zookeeper at all<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      } else {<a name="line.552"></a>
-<span class="sourceLineNo">553</span>        this.activeMasterManager = null;<a name="line.553"></a>
-<span class="sourceLineNo">554</span>      }<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    } catch (Throwable t) {<a name="line.555"></a>
-<span class="sourceLineNo">556</span>      // Make sure we log the exception. HMaster is often started via reflection and the<a name="line.556"></a>
-<span class="sourceLineNo">557</span>      // cause of failed startup is lost.<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      LOG.error("Failed construction of Master", t);<a name="line.558"></a>
-<span class="sourceLineNo">559</span>      throw t;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>  }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>  @Override<a name="line.563"></a>
-<span class="sourceLineNo">564</span>  protected String getUseThisHostnameInstead(Configuration conf) {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    return conf.get(MASTER_HOSTNAME_KEY);<a name="line.565"></a>
-<span class="sourceLineNo">566</span>  }<a name="line.566"></a>
-<span class="sourceLineNo">567</span><a name="line.567"></a>
-<span class="sourceLineNo">568</span>  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  // block in here until then.<a name="line.569"></a>
-<span class="sourceLineNo">570</span>  @Override<a name="line.570"></a>
-<span class="sourceLineNo">571</span>  public void run() {<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    try {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>        Threads.setDaemonThreadRunning(new Thread(() -&gt; {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>          try {<a name="line.575"></a>
-<span class="sourceLineNo">576</span>            int infoPort = putUpJettyServer();<a name="line.576"></a>
-<span class="sourceLineNo">577</span>            startActiveMasterManager(infoPort);<a name="line.577"></a>
-<span class="sourceLineNo">578</span>          } catch (Throwable t) {<a name="line.578"></a>
-<span class="sourceLineNo">579</span>            // Make sure we log the exception.<a name="line.579"></a>
-<span class="sourceLineNo">580</span>            String error = "Failed to become Active Master";<a name="line.580"></a>
-<span class="sourceLineNo">581</span>            LOG.error(error, t);<a name="line.581"></a>
-<span class="sourceLineNo">582</span>            // Abort should have been called already.<a name="line.582"></a>
-<span class="sourceLineNo">583</span>            if (!isAborted()) {<a name="line.583"></a>
-<span class="sourceLineNo">584</span>              abort(error, t);<a name="line.584"></a>
-<span class="sourceLineNo">585</span>            }<a name="line.585"></a>
-<span class="sourceLineNo">586</span>          }<a name="line.586"></a>
-<span class="sourceLineNo">587</span>        }));<a name="line.587"></a>
-<span class="sourceLineNo">588</span>      }<a name="line.588"></a>
-<span class="sourceLineNo">589</span>      // Fall in here even if we have been aborted. Need to run the shutdown services and<a name="line.589"></a>
-<span class="sourceLineNo">590</span>      // the super run call will do this for us.<a name="line.590"></a>
-<span class="sourceLineNo">591</span>      super.run();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    } finally {<a name="line.592"></a>
-<span class="sourceLineNo">593</span>      if (this.clusterSchemaService != null) {<a name="line.593"></a>
-<span class="sourceLineNo">594</span>        // If on way out, then we are no longer active master.<a name="line.594"></a>
-<span class="sourceLineNo">595</span>        this.clusterSchemaService.stopAsync();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>        try {<a name="line.596"></a>
-<span class="sourceLineNo">597</span>          this.clusterSchemaService.awaitTerminated(<a name="line.597"></a>
-<span class="sourceLineNo">598</span>              getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,<a name="line.598"></a>
-<span class="sourceLineNo">599</span>              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);<a name="line.599"></a>
-<span class="sourceLineNo">600</span>        } catch (TimeoutException te) {<a name="line.600"></a>
-<span class="sourceLineNo">601</span>          LOG.warn("Failed shutdown of clusterSchemaService", te);<a name="line.601"></a>
-<span class="sourceLineNo">602</span>        }<a name="line.602"></a>
-<span class="sourceLineNo">603</span>      }<a name="line.603"></a>
-<span class="sourceLineNo">604</span>      this.activeMaster = false;<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    }<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  }<a name="line.606"></a>
-<span class="sourceLineNo">607</span><a name="line.607"></a>
-<span class="sourceLineNo">608</span>  // return the actual infoPort, -1 means disable info server.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>  private int putUpJettyServer() throws IOException {<a name="line.609"></a>
-<span class="sourceLineNo">610</span>    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      return -1;<a name="line.611"></a>
-<span class="sourceLineNo">612</span>    }<a name="line.612"></a>
-<span class="sourceLineNo">613</span>    final int infoPort = conf.getInt("hbase.master.info.port.orig",<a name="line.613"></a>
-<span class="sourceLineNo">614</span>      HConstants.DEFAULT_MASTER_INFOPORT);<a name="line.614"></a>
-<span class="sourceLineNo">615</span>    // -1 is for disabling info server, so no redirecting<a name="line.615"></a>
-<span class="sourceLineNo">616</span>    if (infoPort &lt; 0 || infoServer == null) {<a name="line.616"></a>
-<span class="sourceLineNo">617</span>      return -1;<a name="line.617"></a>
-<span class="sourceLineNo">618</span>    }<a name="line.618"></a>
-<span class="sourceLineNo">619</span>    if(infoPort == infoServer.getPort()) {<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      return infoPort;<a name="line.620"></a>
-<span class="sourceLineNo">621</span>    }<a name="line.621"></a>
-<span class="sourceLineNo">622</span>    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");<a name="line.622"></a>
-<span class="sourceLineNo">623</span>    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {<a name="line.623"></a>
-<span class="sourceLineNo">624</span>      String msg =<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          "Failed to start redirecting jetty server. Address " + addr<a name="line.625"></a>
-<span class="sourceLineNo">626</span>              + " does not belong to this host. Correct configuration parameter: "<a name="line.626"></a>
-<span class="sourceLineNo">627</span>              + "hbase.master.info.bindAddress";<a name="line.627"></a>
-<span class="sourceLineNo">628</span>      LOG.error(msg);<a name="line.628"></a>
-<span class="sourceLineNo">629</span>      throw new IOException(msg);<a name="line.629"></a>
-<span class="sourceLineNo">630</span>    }<a name="line.630"></a>
-<span class="sourceLineNo">631</span><a name="line.631"></a>
-<span class="sourceLineNo">632</span>    // TODO I'm pretty sure we could just add another binding to the InfoServer run by<a name="line.632"></a>
-<span class="sourceLineNo">633</span>    // the RegionServer and have it run the RedirectServlet instead of standing up<a name="line.633"></a>
-<span class="sourceLineNo">634</span>    // a second entire stack here.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>    masterJettyServer = new Server();<a name="line.635"></a>
-<span class="sourceLineNo">636</span>    final ServerConnector connector = new ServerConnector(masterJettyServer);<a name="line.636"></a>
-<span class="sourceLineNo">637</span>    connector.setHost(addr);<a name="line.637"></a>
-<span class="sourceLineNo">638</span>    connector.setPort(infoPort);<a name="line.638"></a>
-<span class="sourceLineNo">639</span>    masterJettyServer.addConnector(connector);<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    masterJettyServer.setStopAtShutdown(true);<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    final String redirectHostname =<a name="line.642"></a>
-<span class="sourceLineNo">643</span>        StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;<a name="line.643"></a>
-<span class="sourceLineNo">644</span><a name="line.644"></a>
-<span class="sourceLineNo">645</span>    final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    context.addServlet(new ServletHolder(redirect), "/*");<a name="line.647"></a>
-<span class="sourceLineNo">648</span>    context.setServer(masterJettyServer);<a name="line.648"></a>
-<span class="sourceLineNo">649</span><a name="line.649"></a>
-<span class="sourceLineNo">650</span>    try {<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      masterJettyServer.start();<a name="line.651"></a>
-<span class="sourceLineNo">652</span>    } catch (Exception e) {<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      throw new IOException("Failed to start redirecting jetty server", e);<a name="line.653"></a>
-<span class="sourceLineNo">654</span>    }<a name="line.654"></a>
-<span class="sourceLineNo">655</span>    return connector.getLocalPort();<a name="line.655"></a>
-<span class="sourceLineNo">656</span>  }<a name="line.656"></a>
-<span class="sourceLineNo">657</span><a name="line.657"></a>
-<span class="sourceLineNo">658</span>  @Override<a name="line.658"></a>
-<span class="sourceLineNo">659</span>  protected Function&lt;TableDescriptorBuilder, TableDescriptorBuilder&gt; getMetaTableObserver() {<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    return builder -&gt; builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));<a name="line.660"></a>
-<span class="sourceLineNo">661</span>  }<a name="line.661"></a>
-<span class="sourceLineNo">662</span>  /**<a name="line.662"></a>
-<span class="sourceLineNo">663</span>   * For compatibility, if failed with regionserver credentials, try the master one<a name="line.663"></a>
-<span class="sourceLineNo">664</span>   */<a name="line.664"></a>
-<span class="sourceLineNo">665</span>  @Override<a name="line.665"></a>
-<span class="sourceLineNo">666</span>  protected void login(UserProvider user, String host) throws IOException {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>    try {<a name="line.667"></a>
-<span class="sourceLineNo">668</span>      super.login(user, host);<a name="line.668"></a>
-<span class="sourceLineNo">669</span>    } catch (IOException ie) {<a name="line.669"></a>
-<span class="sourceLineNo">670</span>      user.login("hbase.master.keytab.file",<a name="line.670"></a>
-<span class="sourceLineNo">671</span>        "hbase.master.kerberos.principal", host);<a name="line.671"></a>
-<span class="sourceLineNo">672</span>    }<a name="line.672"></a>
-<span class="sourceLineNo">673</span>  }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>  /**<a name="line.675"></a>
-<span class="sourceLineNo">676</span>   * If configured to put regions on active master,<a name="line.676"></a>
-<span class="sourceLineNo">677</span>   * wait till a backup master becomes active.<a name="line.677"></a>
-<span class="sourceLineNo">678</span>   * Otherwise, loop till the server is stopped or aborted.<a name="line.678"></a>
-<span class="sourceLineNo">679</span>   */<a name="line.679"></a>
-<span class="sourceLineNo">680</span>  @Override<a name="line.680"></a>
-<span class="sourceLineNo">681</span>  protected void waitForMasterActive(){<a name="line.681"></a>
-<span class="sourceLineNo">682</span>    boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);<a name="line.682"></a>
-<span class="sourceLineNo">683</span>    while (!(tablesOnMaster &amp;&amp; activeMaster) &amp;&amp; !isStopped() &amp;&amp; !isAborted()) {<a name="line.683"></a>
-<span class="sourceLineNo">684</span>      sleeper.sleep();<a name="line.684"></a>
-<span class="sourceLineNo">685</span>    }<a name="line.685"></a>
-<span class="sourceLineNo">686</span>  }<a name="line.686"></a>
-<span class="sourceLineNo">687</span><a name="line.687"></a>
-<span class="sourceLineNo">688</span>  @VisibleForTesting<a name="line.688"></a>
-<span class="sourceLineNo">689</span>  public MasterRpcServices getMasterRpcServices() {<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    return (MasterRpcServices)rpcServices;<a name="line.690"></a>
-<span class="sourceLineNo">691</span>  }<a name="line.691"></a>
-<span class="sourceLineNo">692</span><a name="line.692"></a>
-<span class="sourceLineNo">693</span>  public boolean balanceSwitch(final boolean b) throws IOException {<a name="line.693"></a>
-<span class="sourceLineNo">694</span>    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);<a name="line.694"></a>
-<span class="sourceLineNo">695</span>  }<a name="line.695"></a>
-<span class="sourceLineNo">696</span><a name="line.696"></a>
-<span class="sourceLineNo">697</span>  @Override<a name="line.697"></a>
-<span class="sourceLineNo">698</span>  protected String getProcessName() {<a name="line.698"></a>
-<span class="sourceLineNo">699</span>    return MASTER;<a name="line.699"></a>
-<span class="sourceLineNo">700</span>  }<a name="line.700"></a>
-<span class="sourceLineNo">701</span><a name="line.701"></a>
-<span class="sourceLineNo">702</span>  @Override<a name="line.702"></a>
-<span class="sourceLineNo">703</span>  protected boolean canCreateBaseZNode() {<a name="line.703"></a>
-<span class="sourceLineNo">704</span>    return true;<a name="line.704"></a>
-<span class="sourceLineNo">705</span>  }<a name="line.705"></a>
-<span class="sourceLineNo">706</span><a name="line.706"></a>
-<span class="sourceLineNo">707</span>  @Override<a name="line.707"></a>
-<span class="sourceLineNo">708</span>  protected boolean canUpdateTableDescriptor() {<a name="line.708"></a>
-<span class="sourceLineNo">709</span>    return true;<a name="line.709"></a>
-<span class="sourceLineNo">710</span>  }<a name="line.710"></a>
-<span class="sourceLineNo">711</span><a name="line.711"></a>
-<span class="sourceLineNo">712</span>  @Override<a name="line.712"></a>
-<span class="sourceLineNo">713</span>  protected RSRpcServices createRpcServices() throws IOException {<a name="line.713"></a>
-<span class="sourceLineNo">714</span>    return new MasterRpcServices(this);<a name="line.714"></a>
-<span class="sourceLineNo">715</span>  }<a name="line.715"></a>
-<span class="sourceLineNo">716</span><a name="line.716"></a>
-<span class="sourceLineNo">717</span>  @Override<a name="line.717"></a>
-<span class="sourceLineNo">718</span>  protected void configureInfoServer() {<a name="line.718"></a>
-<span class="sourceLineNo">719</span>    infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);<a name="line.719"></a>
-<span class="sourceLineNo">720</span>    infoServer.setAttribute(MASTER, this);<a name="line.720"></a>
-<span class="sourceLineNo">721</span>    if (LoadBalancer.isTablesOnMaster(conf)) {<a name="line.721"></a>
-<span class="sourceLineNo">722</span>      super.configureInfoServer();<a name="line.722"></a>
-<span class="sourceLineNo">723</span>    }<a name="line.723"></a>
-<span class="sourceLineNo">724</span>  }<a name="line.724"></a>
-<span class="sourceLineNo">725</span><a name="line.725"></a>
-<span class="sourceLineNo">726</span>  @Override<a name="line.726"></a>
-<span class="sourceLineNo">727</span>  protected Class&lt;? extends HttpServlet&gt; getDumpServlet() {<a name="line.727"></a>
-<span class="sourceLineNo">728</span>    return MasterDumpServlet.class;<a name="line.728"></a>
-<span class="sourceLineNo">729</span>  }<a name="line.729"></a>
-<span class="sourceLineNo">730</span><a name="line.730"></a>
-<span class="sourceLineNo">731</span>  @Override<a name="line.731"></a>
-<span class="sourceLineNo">732</span>  public MetricsMaster getMasterMetrics() {<a name="line.732"></a>
-<span class="sourceLineNo">733</span>    return metricsMaster;<a name="line.733"></a>
-<span class="sourceLineNo">734</span>  }<a name="line.734"></a>
-<span class="sourceLineNo">735</span><a name="line.735"></a>
-<span class="sourceLineNo">736</span>  /**<a name="line.736"></a>
-<span class="sourceLineNo">737</span>   * &lt;p&gt;<a name="line.737"></a>
-<span class="sourceLineNo">738</span>   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it<a name="line.738"></a>
-<span class="sourceLineNo">739</span>   * should have already been initialized along with {@link ServerManager}.<a name="line.739"></a>
-<span class="sourceLineNo">740</span>   * &lt;/p&gt;<a name="line.740"></a>
-<span class="sourceLineNo">741</span>   * &lt;p&gt;<a name="line.741"></a>
-<span class="sourceLineNo">742</span>   * Will be overridden in tests.<a name="line.742"></a>
-<span class="sourceLineNo">743</span>   * &lt;/p&gt;<a name="line.743"></a>
-<span class="sourceLineNo">744</span>   */<a name="line.744"></a>
-<span class="sourceLineNo">745</span>  @VisibleForTesting<a name="line.745"></a>
-<span class="sourceLineNo">746</span>  protected void initializeZKBasedSystemTrackers()<a name="line.746"></a>
-<span class="sourceLineNo">747</span>      throws IOException, InterruptedException, KeeperException, ReplicationException {<a name="line.747"></a>
-<span class="sourceLineNo">748</span>    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);<a name="line.748"></a>
-<span class="sourceLineNo">749</span>    this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);<a name="line.749"></a>
-<span class="sourceLineNo">750</span>    this.normalizer.setMasterServices(this);<a name="line.750"></a>
-<span class="sourceLineNo">751</span>    this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);<a name="line.751"></a>
-<span class="sourceLineNo">752</span>    this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);<a name="line.752"></a>
-<span class="sourceLineNo">753</span>    this.loadBalancerTracker.start();<a name="line.753"></a>
-<span class="sourceLineNo">754</span><a name="line.754"></a>
-<span class="sourceLineNo">755</span>    this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    this.regionNormalizerTracker.start();<a name="line.756"></a>
-<span class="sourceLineNo">757</span><a name="line.757"></a>
-<span class="sourceLineNo">758</span>    this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);<a name="line.758"></a>
-<span class="sourceLineNo">759</span>    this.splitOrMergeTracker.start();<a name="line.759"></a>
-<span class="sourceLineNo">760</span><a name="line.760"></a>
-<span class="sourceLineNo">761</span>    this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);<a name="line.761"></a>
-<span class="sourceLineNo">762</span>    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);<a name="line.762"></a>
-<span class="sourceLineNo">763</span><a name="line.763"></a>
-<span class="sourceLineNo">764</span>    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);<a name="line.764"></a>
-<span class="sourceLineNo">765</span>    this.drainingServerTracker.start();<a name="line.765"></a>
-<span class="sourceLineNo">766</span><a name="line.766"></a>
-<span class="sourceLineNo">767</span>    this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);<a name="line.767"></a>
-<span class="sourceLineNo">768</span>    this.maintenanceModeTracker.start();<a name="line.768"></a>
-<span class="sourceLineNo">769</span><a name="line.769"></a>
-<span class="sourceLineNo">770</span>    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);<a name="line.770"></a>
-<span class="sourceLineNo">771</span>    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,<a name="line.771"></a>
-<span class="sourceLineNo">772</span>      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);<a name="line.772"></a>
-<span class="sourceLineNo">773</span>    if (clientQuorumServers != null &amp;&amp; !clientZkObserverMode) {<a name="line.773"></a>
-<span class="sourceLineNo">774</span>      // we need to take care of the ZK information synchronization<a name="line.774"></a>
-<span class="sourceLineNo">775</span>      // if given client ZK are not observer nodes<a name="line.775"></a>
-<span class="sourceLineNo">776</span>      ZKWatcher clientZkWatcher = new ZKWatcher(conf,<a name="line.776"></a>
-<span class="sourceLineNo">777</span>          getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,<a name="line.777"></a>
-<span class="sourceLineNo">778</span>          false, true);<a name="line.778"></a>
-<span class="sourceLineNo">779</span>      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);<a name="line.779"></a>
-<span class="sourceLineNo">780</span>      this.metaLocationSyncer.start();<a name="line.780"></a>
-<span class="sourceLineNo">781</span>      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);<a name="line.781"></a>
-<span class="sourceLineNo">782</span>      this.masterAddressSyncer.start();<a name="line.782"></a>
-<span class="sourceLineNo">783</span>      // set cluster id is a one-go effort<a name="line.783"></a>
-<span class="sourceLineNo">784</span>      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());<a name="line.784"></a>
-<span class="sourceLineNo">785</span>    }<a name="line.785"></a>
-<span class="sourceLineNo">786</span><a name="line.786"></a>
-<span class="sourceLineNo">787</span>    // Set the cluster as up.  If new RSs, they'll be waiting on this before<a name="line.787"></a>
-<span class="sourceLineNo">788</span>    // going ahead with their startup.<a name="line.788"></a>
-<span class="sourceLineNo">789</span>    boolean wasUp = this.clusterStatusTracker.isClusterUp();<a name="line.789"></a>
-<span class="sourceLineNo">790</span>    if (!wasUp) this.clusterStatusTracker.setClusterUp();<a name="line.790"></a>
-<span class="sourceLineNo">791</span><a name="line.791"></a>
-<span class="sourceLineNo">792</span>    LOG.info("Active/primary master=" + this.serverName +<a name="line.792"></a>
-<span class="sourceLineNo">793</span>        ", sessionid=0x" +<a name="line.793"></a>
-<span class="sourceLineNo">794</span>        Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +<a name="line.794"></a>
-<span class="sourceLineNo">795</span>        ", setting cluster-up flag (Was=" + wasUp + ")");<a name="line.795"></a>
-<span class="sourceLineNo">796</span><a name="line.796"></a>
-<span class="sourceLineNo">797</span>    // create/initialize the snapshot manager and other procedure managers<a name="line.797"></a>
-<span class="sourceLineNo">798</span>    this.snapshotManager = new SnapshotManager();<a name="line.798"></a>
-<span class="sourceLineNo">799</span>    this.mpmHost = new MasterProcedureManagerHost();<a name="line.799"></a>
-<span class="sourceLineNo">800</span>    this.mpmHost.register(this.snapshotManager);<a name="line.800"></a>
-<span class="sourceLineNo">801</span>    this.mpmHost.register(new MasterFlushTableProcedureManager());<a name="line.801"></a>
-<span class="sourceLineNo">802</span>    this.mpmHost.loadProcedures(conf);<a name="line.802"></a>
-<span class="sourceLineNo">803</span>    this.mpmHost.initialize(this, this.metricsMaster);<a name="line.803"></a>
-<span class="sourceLineNo">804</span>  }<a name="line.804"></a>
-<span class="sourceLineNo">805</span><a name="line.805"></a>
-<span class="sourceLineNo">806</span>  private static final ImmutableSet&lt;Class&lt;?&gt;&gt; UNSUPPORTED_PROCEDURES =<a name="line.806"></a>
-<span class="sourceLineNo">807</span>    ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class,<a name="line.807"></a>
-<span class="sourceLineNo">808</span>      MoveRegionProcedure.class);<a name="line.808"></a>
-<span class="sourceLineNo">809</span><a name="line.809"></a>
-<span class="sourceLineNo">810</span>  /**<a name="line.810"></a>
-<span class="sourceLineNo">811</span>   * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is<a name="line.811"></a>
-<span class="sourceLineNo">812</span>   * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to<a name="line.812"></a>
-<span class="sourceLineNo">813</span>   * make sure that there are none these procedures when upgrading. If there are, the master will<a name="line.813"></a>
-<span class="sourceLineNo">814</span>   * quit, you need to go back to the old version to finish these procedures first before upgrading.<a name="line.814"></a>
-<span class="sourceLineNo">815</span>   */<a name="line.815"></a>
-<span class="sourceLineNo">816</span>  private void checkUnsupportedProcedure(<a name="line.816"></a>
-<span class="sourceLineNo">817</span>      Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType)<a name="line.817"></a>
-<span class="sourceLineNo">818</span>      throws HBaseIOException {<a name="line.818"></a>
-<span class="sourceLineNo">819</span>    // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to<a name="line.819"></a>
-<span class="sourceLineNo">820</span>    // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as<a name="line.820"></a>
-<span class="sourceLineNo">821</span>    // there will be conflict in the code for AM. We should finish all these procedures before<a name="line.821"></a>
-<span class="sourceLineNo">822</span>    // upgrading.<a name="line.822"></a>
-<span class="sourceLineNo">823</span>    for (Class&lt;?&gt; clazz : UNSUPPORTED_PROCEDURES) {<a name="line.823"></a>
-<span class="sourceLineNo">824</span>      List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt; procs = procsByType.get(clazz);<a name="line.824"></a>
-<span class="sourceLineNo">825</span>      if (procs != null) {<a name="line.825"></a>
-<span class="sourceLineNo">826</span>        LOG.error(<a name="line.826"></a>
-<span class="sourceLineNo">827</span>          "Unsupported procedure type {} found, please rollback your master to the old" +<a name="line.827"></a>
-<span class="sourceLineNo">828</span>            " version to finish them, and then try to upgrade again. The full procedure list: {}",<a name="line.828"></a>
-<span class="sourceLineNo">829</span>          clazz, procs);<a name="line.829"></a>
-<span class="sourceLineNo">830</span>        throw new HBaseIOException("Unsupported procedure type " + clazz + " found");<a name="line.830"></a>
-<span class="sourceLineNo">831</span>      }<a name="line.831"></a>
-<span class="sourceLineNo">832</span>    }<a name="line.832"></a>
-<span class="sourceLineNo">833</span>    // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to<a name="line.833"></a>
-<span class="sourceLineNo">834</span>    // make sure that no one will try to schedule it but SCP does have a state which will schedule<a name="line.834"></a>
-<span class="sourceLineNo">835</span>    // it.<a name="line.835"></a>
-<span class="sourceLineNo">836</span>    if (procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.836"></a>
-<span class="sourceLineNo">837</span>      .map(p -&gt; (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState)) {<a name="line.837"></a>
-<span class="sourceLineNo">838</span>      LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," +<a name="line.838"></a>
-<span class="sourceLineNo">839</span>        " which is not supported any more. Please rollback your master to the old version to" +<a name="line.839"></a>
-<span class="sourceLineNo">840</span>        " finish them, and then try to upgrade again.");<a name="line.840"></a>
-<span class="sourceLineNo">841</span>      throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure");<a name="line.841"></a>
-<span class="sourceLineNo">842</span>    }<a name="line.842"></a>
-<span class="sourceLineNo">843</span>  }<a name="line.843"></a>
-<span class="sourceLineNo">844</span><a name="line.844"></a>
-<span class="sourceLineNo">845</span>  /**<a name="line.845"></a>
-<span class="sourceLineNo">846</span>   * Finish initialization of HMaster after becoming the primary master.<a name="line.846"></a>
-<span class="sourceLineNo">847</span>   * &lt;p/&gt;<a name="line.847"></a>
-<span class="sourceLineNo">848</span>   * The startup order is a bit complicated but very important, do not change it unless you know<a name="line.848"></a>
-<span class="sourceLineNo">849</span>   * what you are doing.<a name="line.849"></a>
-<span class="sourceLineNo">850</span>   * &lt;ol&gt;<a name="line.850"></a>
-<span class="sourceLineNo">851</span>   * &lt;li&gt;Initialize file system based components - file system manager, wal manager, table<a name="line.851"></a>
-<span class="sourceLineNo">852</span>   * descriptors, etc&lt;/li&gt;<a name="line.852"></a>
-<span class="sourceLineNo">853</span>   * &lt;li&gt;Publish cluster id&lt;/li&gt;<a name="line.853"></a>
-<span class="sourceLineNo">854</span>   * &lt;li&gt;Here comes the most complicated part - initialize server manager, assignment manager and<a name="line.854"></a>
-<span class="sourceLineNo">855</span>   * region server tracker<a name="line.855"></a>
-<span class="sourceLineNo">856</span>   * &lt;ol type='i'&gt;<a name="line.856"></a>
-<span class="sourceLineNo">857</span>   * &lt;li&gt;Create server manager&lt;/li&gt;<a name="line.857"></a>
-<span class="sourceLineNo">858</span>   * &lt;li&gt;Create procedure executor, load the procedures, but do not start workers. We will start it<a name="line.858"></a>
-<span class="sourceLineNo">859</span>   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same<a name="line.859"></a>
-<span class="sourceLineNo">860</span>   * server&lt;/li&gt;<a name="line.860"></a>
-<span class="sourceLineNo">861</span>   * &lt;li&gt;Create assignment manager and start it, load the meta region state, but do not load data<a name="line.861"></a>
-<span class="sourceLineNo">862</span>   * from meta region&lt;/li&gt;<a name="line.862"></a>
-<span class="sourceLineNo">863</span>   * &lt;li&gt;Start region server tracker, construct the online servers set and find out dead servers and<a name="line.863"></a>
-<span class="sourceLineNo">864</span>   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also<a name="line.864"></a>
-<span class="sourceLineNo">865</span>   * scan the wal directory to find out possible live region servers, and the differences between<a name="line.865"></a>
-<span class="sourceLineNo">866</span>   * these two sets are the dead servers&lt;/li&gt;<a name="line.866"></a>
-<span class="sourceLineNo">867</span>   * &lt;/ol&gt;<a name="line.867"></a>
-<span class="sourceLineNo">868</span>   * &lt;/li&gt;<a name="line.868"></a>
-<span class="sourceLineNo">869</span>   * &lt;li&gt;If this is a new deploy, schedule a InitMetaProcedure to initialize meta&lt;/li&gt;<a name="line.869"></a>
-<span class="sourceLineNo">870</span>   * &lt;li&gt;Start necessary service threads - balancer, catalog janior, executor services, and also the<a name="line.870"></a>
-<span class="sourceLineNo">871</span>   * procedure executor, etc. Notice that the balancer must be created first as assignment manager<a name="line.871"></a>
-<span class="sourceLineNo">872</span>   * may use it when assigning regions.&lt;/li&gt;<a name="line.872"></a>
-<span class="sourceLineNo">873</span>   * &lt;li&gt;Wait for meta to be initialized if necesssary, start table state manager.&lt;/li&gt;<a name="line.873"></a>
-<span class="sourceLineNo">874</span>   * &lt;li&gt;Wait for enough region servers to check-in&lt;/li&gt;<a name="line.874"></a>
-<span class="sourceLineNo">875</span>   * &lt;li&gt;Let assignment manager load data from meta and construct region states&lt;/li&gt;<a name="line.875"></a>
-<span class="sourceLineNo">876</span>   * &lt;li&gt;Start all other things such as chore services, etc&lt;/li&gt;<a name="line.876"></a>
-<span class="sourceLineNo">877</span>   * &lt;/ol&gt;<a name="line.877"></a>
-<span class="sourceLineNo">878</span>   * &lt;p/&gt;<a name="line.878"></a>
-<span class="sourceLineNo">879</span>   * Notice that now we will not schedule a special procedure to make meta online(unless the first<a name="line.879"></a>
-<span class="sourceLineNo">880</span>   * time where meta has not been created yet), we will rely on SCP to bring meta online.<a name="line.880"></a>
-<span class="sourceLineNo">881</span>   */<a name="line.881"></a>
-<span class="sourceLineNo">882</span>  private void finishActiveMasterInitialization(MonitoredTask status) throws IOException,<a name="line.882"></a>
-<span class="sourceLineNo">883</span>          InterruptedException, KeeperException, ReplicationException {<a name="line.883"></a>
-<span class="sourceLineNo">884</span>    Thread zombieDetector = new Thread(new InitializationMonitor(this),<a name="line.884"></a>
-<span class="sourceLineNo">885</span>        "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());<a name="line.885"></a>
-<span class="sourceLineNo">886</span>    zombieDetector.setDaemon(true);<a name="line.886"></a>
-<span class="sourceLineNo">887</span>    zombieDetector.start();<a name="line.887"></a>
-<span class="sourceLineNo">888</span><a name="line.888"></a>
-<span class="sourceLineNo">889</span>    /*<a name="line.889"></a>
-<span class="sourceLineNo">890</span>     * We are active master now... go initialize components we need to run.<a name="line.890"></a>
-<span class="sourceLineNo">891</span>     */<a name="line.891"></a>
-<span class="sourceLineNo">892</span>    status.setStatus("Initializing Master file system");<a name="line.892"></a>
-<span class="sourceLineNo">893</span><a name="line.893"></a>
-<span class="sourceLineNo">894</span>    this.masterActiveTime = System.currentTimeMillis();<a name="line.894"></a>
-<span class="sourceLineNo">895</span>    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.<a name="line.895"></a>
-<span class="sourceLineNo">896</span>    // Initialize the chunkCreator<a name="line.896"></a>
-<span class="sourceLineNo">897</span>    initializeMemStoreChunkCreator();<a name="line.897"></a>
-<span class="sourceLineNo">898</span>    this.fileSystemManager = new MasterFileSystem(conf);<a name="line.898"></a>
-<span class="sourceLineNo">899</span>    this.walManager = new MasterWalManager(this);<a name="line.899"></a>
-<span class="sourceLineNo">900</span><a name="line.900"></a>
-<span class="sourceLineNo">901</span>    // enable table descriptors cache<a name="line.901"></a>
-<span class="sourceLineNo">902</span>    this.tableDescriptors.setCacheOn();<a name="line.902"></a>
-<span class="sourceLineNo">903</span><a name="line.903"></a>
-<span class="sourceLineNo">904</span>    // warm-up HTDs cache on master initialization<a name="line.904"></a>
-<span class="sourceLineNo">905</span>    if (preLoadTableDescriptors) {<a name="line.905"></a>
-<span class="sourceLineNo">906</span>      status.setStatus("Pre-loading table descriptors");<a name="line.906"></a>
-<span class="sourceLineNo">907</span>      this.tableDescriptors.getAll();<a name="line.907"></a>
-<span class="sourceLineNo">908</span>    }<a name="line.908"></a>
-<span class="sourceLineNo">909</span><a name="line.909"></a>
-<span class="sourceLineNo">910</span>    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but<a name="line.910"></a>
-<span class="sourceLineNo">911</span>    // only after it has checked in with the Master. At least a few tests ask Master for clusterId<a name="line.911"></a>
-<span class="sourceLineNo">912</span>    // before it has called its run method and before RegionServer has done the reportForDuty.<a name="line.912"></a>
-<span class="sourceLineNo">913</span>    ClusterId clusterId = fileSystemManager.getClusterId();<a name="line.913"></a>
-<span class="sourceLineNo">914</span>    status.setStatus("Publishing Cluster ID " + clusterId + " in ZooKeeper");<a name="line.914"></a>
-<span class="sourceLineNo">915</span>    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());<a name="line.915"></a>
-<span class="sourceLineNo">916</span>    this.clusterId = clusterId.toString();<a name="line.916"></a>
-<span class="sourceLineNo">917</span><a name="line.917"></a>
-<span class="sourceLineNo">918</span>    status.setStatus("Initialze ServerManager and schedule SCP for crash servers");<a name="line.918"></a>
-<span class="sourceLineNo">919</span>    this.serverManager = createServerManager(this);<a name="line.919"></a>
-<span class="sourceLineNo">920</span>    createProcedureExecutor();<a name="line.920"></a>
-<span class="sourceLineNo">921</span>    @SuppressWarnings("rawtypes")<a name="line.921"></a>
-<span class="sourceLineNo">922</span>    Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType =<a name="line.922"></a>
-<span class="sourceLineNo">923</span>      procedureExecutor.getActiveProceduresNoCopy().stream()<a name="line.923"></a>
-<span class="sourceLineNo">924</span>        .collect(Collectors.groupingBy(p -&gt; p.getClass()));<a name="line.924"></a>
-<span class="sourceLineNo">925</span><a name="line.925"></a>
-<span class="sourceLineNo">926</span>    checkUnsupportedProcedure(procsByType);<a name="line.926"></a>
-<span class="sourceLineNo">927</span><a name="line.927"></a>
-<span class="sourceLineNo">928</span>    // Create Assignment Manager<a name="line.928"></a>
-<span class="sourceLineNo">929</span>    this.assignmentManager = new AssignmentManager(this);<a name="line.929"></a>
-<span class="sourceLineNo">930</span>    this.assignmentManager.start();<a name="line.930"></a>
-<span class="sourceLineNo">931</span>    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as<a name="line.931"></a>
-<span class="sourceLineNo">932</span>    // completed, it could still be in the procedure list. This is a bit strange but is another<a name="line.932"></a>
-<span class="sourceLineNo">933</span>    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.<a name="line.933"></a>
-<span class="sourceLineNo">934</span>    List&lt;TransitRegionStateProcedure&gt; ritList =<a name="line.934"></a>
-<span class="sourceLineNo">935</span>      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()<a name="line.935"></a>
-<span class="sourceLineNo">936</span>        .filter(p -&gt; !p.isFinished()).map(p -&gt; (TransitRegionStateProcedure) p)<a name="line.936"></a>
-<span class="sourceLineNo">937</span>        .collect(Collectors.toList());<a name="line.937"></a>
-<span class="sourceLineNo">938</span>    this.assignmentManager.setupRIT(ritList);<a name="line.938"></a>
-<span class="sourceLineNo">939</span><a name="line.939"></a>
-<span class="sourceLineNo">940</span>    this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);<a name="line.940"></a>
-<span class="sourceLineNo">941</span>    this.regionServerTracker.start(<a name="line.941"></a>
-<span class="sourceLineNo">942</span>      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.942"></a>
-<span class="sourceLineNo">943</span>        .map(p -&gt; (ServerCrashProcedure) p).map(p -&gt; p.getServerName()).collect(Collectors.toSet()),<a name="line.943"></a>
-<span class="sourceLineNo">944</span>      walManager.getLiveServersFromWALDir());<a name="line.944"></a>
-<span class="sourceLineNo">945</span>    // This manager will be started AFTER hbase:meta is confirmed on line.<a name="line.945"></a>
-<span class="sourceLineNo">946</span>    // hbase.mirror.table.state.to.zookeeper is so hbase1 clients can connect. They read table<a name="line.946"></a>
-<span class="sourceLineNo">947</span>    // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients.<a name="line.947"></a>
-<span class="sourceLineNo">948</span>    this.tableStateManager =<a name="line.948"></a>
-<span class="sourceLineNo">949</span>      this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true)<a name="line.949"></a>
-<span class="sourceLineNo">950</span>        ?<a name="line.950"></a>
-<span class="sourceLineNo">951</span>        new MirroringTableStateManager(this):<a name="line.951"></a>
-<span class="sourceLineNo">952</span>        new TableStateManager(this);<a name="line.952"></a>
-<span class="sourceLineNo">953</span><a name="line.953"></a>
-<span class="sourceLineNo">954</span>    status.setStatus("Initializing ZK system trackers");<a name="line.954"></a>
-<span class="sourceLineNo">955</span>    initializeZKBasedSystemTrackers();<a name="line.955"></a>
-<span class="sourceLineNo">956</span>    status.setStatus("Loading last flushed sequence id of regions");<a name="line.956"></a>
-<span class="sourceLineNo">957</span>    try {<a name="line.957"></a>
-<span class="sourceLineNo">958</span>      this.serverManager.loadLastFlushedSequenceIds();<a name="line.958"></a>
-<span class="sourceLineNo">959</span>    } catch (IOException e) {<a name="line.959"></a>
-<span class="sourceLineNo">960</span>      LOG.debug("Failed to load last flushed sequence id of regions"<a name="line.960"></a>
-<span class="sourceLineNo">961</span>          + " from file system", e);<a name="line.961"></a>
-<span class="sourceLineNo">962</span>    }<a name="line.962"></a>
-<span class="sourceLineNo">963</span>    // Set ourselves as active Master now our claim has succeeded up in zk.<a name="line.963"></a>
-<span class="sourceLineNo">964</span>    this.activeMaster = true;<a name="line.964"></a>
-<span class="sourceLineNo">965</span><a name="line.965"></a>
-<span class="sourceLineNo">966</span>    // This is for backwards compatibility<a name="line.966"></a>
-<span class="sourceLineNo">967</span>    // See HBASE-11393<a name="line.967"></a>
-<span class="sourceLineNo">968</span>    status.setStatus("Update TableCFs node in ZNode");<a name="line.968"></a>
-<span class="sourceLineNo">969</span>    ReplicationPeerConfigUpgrader tableCFsUpdater =<a name="line.969"></a>
-<span class="sourceLineNo">970</span>        new ReplicationPeerConfigUpgrader(zooKeeper, conf);<a name="line.970"></a>
-<span class="sourceLineNo">971</span>    tableCFsUpdater.copyTableCFs();<a name="line.971"></a>
-<span class="sourceLineNo">972</span><a name="line.972"></a>
-<span class="sourceLineNo">973</span>    // Add the Observer to delete quotas on table deletion before starting all CPs by<a name="line.973"></a>
-<span class="sourceLineNo">974</span>    // default with quota support, avoiding if user specifically asks to not load this Observer.<a name="line.974"></a>
-<span class="sourceLineNo">975</span>    if (QuotaUtil.isQuotaEnabled(conf)) {<a name="line.975"></a>
-<span class="sourceLineNo">976</span>      updateConfigurationForQuotasObserver(conf);<a name="line.976"></a>
-<span class="sourceLineNo">977</span>    }<a name="line.977"></a>
-<span class="sourceLineNo">978</span>    // initialize master side coprocessors before we start handling requests<a name="line.978"></a>
-<span class="sourceLineNo">979</span>    status.setStatus("Initializing master coprocessors");<a name="line.979"></a>
-<span class="sourceLineNo">980</span>    this.cpHost = new MasterCoprocessorHost(this, this.conf);<a name="line.980"></a>
+<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HBaseFsck;<a name="line.196"></a>
+<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.197"></a>
+<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.198"></a>
+<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.199"></a>
+<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.200"></a>
+<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.201"></a>
+<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.203"></a>
+<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.204"></a>
+<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.207"></a>
+<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.211"></a>
+<span class="sourceLineNo">212</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.212"></a>
+<span class="sourceLineNo">213</span>import org.apache.zookeeper.KeeperException;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.Server;<a name="line.214"></a>
+<span class="sourceLineNo">215</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.215"></a>
+<span class="sourceLineNo">216</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.216"></a>
+<span class="sourceLineNo">217</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>import org.slf4j.Logger;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>import org.slf4j.LoggerFactory;<a name="line.219"></a>
+<span class="sourceLineNo">220</span><a name="line.220"></a>
+<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.224"></a>
+<span class="sourceLineNo">225</span><a name="line.225"></a>
+<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.226"></a>
+<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.227"></a>
+<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.228"></a>
+<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>/**<a name="line.233"></a>
+<span class="sourceLineNo">234</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.234"></a>
+<span class="sourceLineNo">235</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.235"></a>
+<span class="sourceLineNo">236</span> * run the cluster.  All others park themselves in their constructor until<a name="line.236"></a>
+<span class="sourceLineNo">237</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.237"></a>
+<span class="sourceLineNo">238</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.238"></a>
+<span class="sourceLineNo">239</span> *<a name="line.239"></a>
+<span class="sourceLineNo">240</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.240"></a>
+<span class="sourceLineNo">241</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.241"></a>
+<span class="sourceLineNo">242</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.242"></a>
+<span class="sourceLineNo">243</span> *<a name="line.243"></a>
+<span class="sourceLineNo">244</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.244"></a>
+<span class="so

<TRUNCATED>

[06/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"><

<TRUNCATED>

[25/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index 7df71bd..a990153 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -201,3634 +201,3643 @@
 <span class="sourceLineNo">193</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.193"></a>
 <span class="sourceLineNo">194</span>import org.apache.hadoop.hbase.util.CompressionTest;<a name="line.194"></a>
 <span class="sourceLineNo">195</span>import org.apache.hadoop.hbase.util.EncryptionTest;<a name="line.195"></a>
-<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.196"></a>
-<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.197"></a>
-<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.199"></a>
-<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.200"></a>
-<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.201"></a>
-<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.203"></a>
-<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.204"></a>
-<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.206"></a>
-<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.210"></a>
-<span class="sourceLineNo">211</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.211"></a>
-<span class="sourceLineNo">212</span>import org.apache.zookeeper.KeeperException;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>import org.eclipse.jetty.server.Server;<a name="line.213"></a>
-<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.215"></a>
-<span class="sourceLineNo">216</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>import org.slf4j.Logger;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>import org.slf4j.LoggerFactory;<a name="line.218"></a>
-<span class="sourceLineNo">219</span><a name="line.219"></a>
-<span class="sourceLineNo">220</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.222"></a>
-<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.223"></a>
-<span class="sourceLineNo">224</span><a name="line.224"></a>
-<span class="sourceLineNo">225</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.226"></a>
-<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.227"></a>
-<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.230"></a>
-<span class="sourceLineNo">231</span><a name="line.231"></a>
-<span class="sourceLineNo">232</span>/**<a name="line.232"></a>
-<span class="sourceLineNo">233</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.233"></a>
-<span class="sourceLineNo">234</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.234"></a>
-<span class="sourceLineNo">235</span> * run the cluster.  All others park themselves in their constructor until<a name="line.235"></a>
-<span class="sourceLineNo">236</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.236"></a>
-<span class="sourceLineNo">237</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.237"></a>
-<span class="sourceLineNo">238</span> *<a name="line.238"></a>
-<span class="sourceLineNo">239</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.239"></a>
-<span class="sourceLineNo">240</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.240"></a>
-<span class="sourceLineNo">241</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.241"></a>
-<span class="sourceLineNo">242</span> *<a name="line.242"></a>
-<span class="sourceLineNo">243</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.243"></a>
-<span class="sourceLineNo">244</span> *<a name="line.244"></a>
-<span class="sourceLineNo">245</span> * @see org.apache.zookeeper.Watcher<a name="line.245"></a>
-<span class="sourceLineNo">246</span> */<a name="line.246"></a>
-<span class="sourceLineNo">247</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.247"></a>
-<span class="sourceLineNo">248</span>@SuppressWarnings("deprecation")<a name="line.248"></a>
-<span class="sourceLineNo">249</span>public class HMaster extends HRegionServer implements MasterServices {<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private static Logger LOG = LoggerFactory.getLogger(HMaster.class);<a name="line.250"></a>
-<span class="sourceLineNo">251</span><a name="line.251"></a>
-<span class="sourceLineNo">252</span>  /**<a name="line.252"></a>
-<span class="sourceLineNo">253</span>   * Protection against zombie master. Started once Master accepts active responsibility and<a name="line.253"></a>
-<span class="sourceLineNo">254</span>   * starts taking over responsibilities. Allows a finite time window before giving up ownership.<a name="line.254"></a>
-<span class="sourceLineNo">255</span>   */<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private static class InitializationMonitor extends HasThread {<a name="line.256"></a>
-<span class="sourceLineNo">257</span>    /** The amount of time in milliseconds to sleep before checking initialization status. */<a name="line.257"></a>
-<span class="sourceLineNo">258</span>    public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";<a name="line.258"></a>
-<span class="sourceLineNo">259</span>    public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);<a name="line.259"></a>
-<span class="sourceLineNo">260</span><a name="line.260"></a>
-<span class="sourceLineNo">261</span>    /**<a name="line.261"></a>
-<span class="sourceLineNo">262</span>     * When timeout expired and initialization has not complete, call {@link System#exit(int)} when<a name="line.262"></a>
-<span class="sourceLineNo">263</span>     * true, do nothing otherwise.<a name="line.263"></a>
-<span class="sourceLineNo">264</span>     */<a name="line.264"></a>
-<span class="sourceLineNo">265</span>    public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";<a name="line.265"></a>
-<span class="sourceLineNo">266</span>    public static final boolean HALT_DEFAULT = false;<a name="line.266"></a>
-<span class="sourceLineNo">267</span><a name="line.267"></a>
-<span class="sourceLineNo">268</span>    private final HMaster master;<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    private final long timeout;<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    private final boolean haltOnTimeout;<a name="line.270"></a>
-<span class="sourceLineNo">271</span><a name="line.271"></a>
-<span class="sourceLineNo">272</span>    /** Creates a Thread that monitors the {@link #isInitialized()} state. */<a name="line.272"></a>
-<span class="sourceLineNo">273</span>    InitializationMonitor(HMaster master) {<a name="line.273"></a>
-<span class="sourceLineNo">274</span>      super("MasterInitializationMonitor");<a name="line.274"></a>
-<span class="sourceLineNo">275</span>      this.master = master;<a name="line.275"></a>
-<span class="sourceLineNo">276</span>      this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);<a name="line.276"></a>
-<span class="sourceLineNo">277</span>      this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);<a name="line.277"></a>
-<span class="sourceLineNo">278</span>      this.setDaemon(true);<a name="line.278"></a>
-<span class="sourceLineNo">279</span>    }<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>    @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>    public void run() {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>      try {<a name="line.283"></a>
-<span class="sourceLineNo">284</span>        while (!master.isStopped() &amp;&amp; master.isActiveMaster()) {<a name="line.284"></a>
-<span class="sourceLineNo">285</span>          Thread.sleep(timeout);<a name="line.285"></a>
-<span class="sourceLineNo">286</span>          if (master.isInitialized()) {<a name="line.286"></a>
-<span class="sourceLineNo">287</span>            LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");<a name="line.287"></a>
-<span class="sourceLineNo">288</span>          } else {<a name="line.288"></a>
-<span class="sourceLineNo">289</span>            LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"<a name="line.289"></a>
-<span class="sourceLineNo">290</span>                + " consider submitting a bug report including a thread dump of this process.");<a name="line.290"></a>
-<span class="sourceLineNo">291</span>            if (haltOnTimeout) {<a name="line.291"></a>
-<span class="sourceLineNo">292</span>              LOG.error("Zombie Master exiting. Thread dump to stdout");<a name="line.292"></a>
-<span class="sourceLineNo">293</span>              Threads.printThreadInfo(System.out, "Zombie HMaster");<a name="line.293"></a>
-<span class="sourceLineNo">294</span>              System.exit(-1);<a name="line.294"></a>
-<span class="sourceLineNo">295</span>            }<a name="line.295"></a>
-<span class="sourceLineNo">296</span>          }<a name="line.296"></a>
-<span class="sourceLineNo">297</span>        }<a name="line.297"></a>
-<span class="sourceLineNo">298</span>      } catch (InterruptedException ie) {<a name="line.298"></a>
-<span class="sourceLineNo">299</span>        LOG.trace("InitMonitor thread interrupted. Existing.");<a name="line.299"></a>
-<span class="sourceLineNo">300</span>      }<a name="line.300"></a>
-<span class="sourceLineNo">301</span>    }<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  }<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  // MASTER is name of the webapp and the attribute name used stuffing this<a name="line.304"></a>
-<span class="sourceLineNo">305</span>  //instance into web context.<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  public static final String MASTER = "master";<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  // Manager and zk listener for master election<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private final ActiveMasterManager activeMasterManager;<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Region server tracker<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private RegionServerTracker regionServerTracker;<a name="line.311"></a>
-<span class="sourceLineNo">312</span>  // Draining region server tracker<a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private DrainingServerTracker drainingServerTracker;<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  // Tracker for load balancer state<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  LoadBalancerTracker loadBalancerTracker;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  // Tracker for meta location, if any client ZK quorum specified<a name="line.316"></a>
-<span class="sourceLineNo">317</span>  MetaLocationSyncer metaLocationSyncer;<a name="line.317"></a>
-<span class="sourceLineNo">318</span>  // Tracker for active master location, if any client ZK quorum specified<a name="line.318"></a>
-<span class="sourceLineNo">319</span>  MasterAddressSyncer masterAddressSyncer;<a name="line.319"></a>
-<span class="sourceLineNo">320</span><a name="line.320"></a>
-<span class="sourceLineNo">321</span>  // Tracker for split and merge state<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private SplitOrMergeTracker splitOrMergeTracker;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  // Tracker for region normalizer state<a name="line.324"></a>
-<span class="sourceLineNo">325</span>  private RegionNormalizerTracker regionNormalizerTracker;<a name="line.325"></a>
-<span class="sourceLineNo">326</span><a name="line.326"></a>
-<span class="sourceLineNo">327</span>  //Tracker for master maintenance mode setting<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private MasterMaintenanceModeTracker maintenanceModeTracker;<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private ClusterSchemaService clusterSchemaService;<a name="line.330"></a>
-<span class="sourceLineNo">331</span><a name="line.331"></a>
-<span class="sourceLineNo">332</span>  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    "hbase.master.wait.on.service.seconds";<a name="line.333"></a>
-<span class="sourceLineNo">334</span>  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;<a name="line.334"></a>
-<span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  // Metrics for the HMaster<a name="line.336"></a>
-<span class="sourceLineNo">337</span>  final MetricsMaster metricsMaster;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  // file system manager for the master FS operations<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private MasterFileSystem fileSystemManager;<a name="line.339"></a>
-<span class="sourceLineNo">340</span>  private MasterWalManager walManager;<a name="line.340"></a>
-<span class="sourceLineNo">341</span><a name="line.341"></a>
-<span class="sourceLineNo">342</span>  // server manager to deal with region server info<a name="line.342"></a>
-<span class="sourceLineNo">343</span>  private volatile ServerManager serverManager;<a name="line.343"></a>
-<span class="sourceLineNo">344</span><a name="line.344"></a>
-<span class="sourceLineNo">345</span>  // manager of assignment nodes in zookeeper<a name="line.345"></a>
-<span class="sourceLineNo">346</span>  private AssignmentManager assignmentManager;<a name="line.346"></a>
-<span class="sourceLineNo">347</span><a name="line.347"></a>
-<span class="sourceLineNo">348</span>  // manager of replication<a name="line.348"></a>
-<span class="sourceLineNo">349</span>  private ReplicationPeerManager replicationPeerManager;<a name="line.349"></a>
-<span class="sourceLineNo">350</span><a name="line.350"></a>
-<span class="sourceLineNo">351</span>  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;<a name="line.351"></a>
-<span class="sourceLineNo">352</span><a name="line.352"></a>
-<span class="sourceLineNo">353</span>  // buffer for "fatal error" notices from region servers<a name="line.353"></a>
-<span class="sourceLineNo">354</span>  // in the cluster. This is only used for assisting<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  // operations/debugging.<a name="line.355"></a>
-<span class="sourceLineNo">356</span>  MemoryBoundedLogMessageBuffer rsFatals;<a name="line.356"></a>
-<span class="sourceLineNo">357</span><a name="line.357"></a>
-<span class="sourceLineNo">358</span>  // flag set after we become the active master (used for testing)<a name="line.358"></a>
-<span class="sourceLineNo">359</span>  private volatile boolean activeMaster = false;<a name="line.359"></a>
-<span class="sourceLineNo">360</span><a name="line.360"></a>
-<span class="sourceLineNo">361</span>  // flag set after we complete initialization once active<a name="line.361"></a>
-<span class="sourceLineNo">362</span>  private final ProcedureEvent&lt;?&gt; initialized = new ProcedureEvent&lt;&gt;("master initialized");<a name="line.362"></a>
-<span class="sourceLineNo">363</span><a name="line.363"></a>
-<span class="sourceLineNo">364</span>  // flag set after master services are started,<a name="line.364"></a>
-<span class="sourceLineNo">365</span>  // initialization may have not completed yet.<a name="line.365"></a>
-<span class="sourceLineNo">366</span>  volatile boolean serviceStarted = false;<a name="line.366"></a>
-<span class="sourceLineNo">367</span><a name="line.367"></a>
-<span class="sourceLineNo">368</span>  // Maximum time we should run balancer for<a name="line.368"></a>
-<span class="sourceLineNo">369</span>  private final int maxBlancingTime;<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  // Maximum percent of regions in transition when balancing<a name="line.370"></a>
-<span class="sourceLineNo">371</span>  private final double maxRitPercent;<a name="line.371"></a>
-<span class="sourceLineNo">372</span><a name="line.372"></a>
-<span class="sourceLineNo">373</span>  private final LockManager lockManager = new LockManager(this);<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>  private LoadBalancer balancer;<a name="line.375"></a>
-<span class="sourceLineNo">376</span>  private RegionNormalizer normalizer;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>  private BalancerChore balancerChore;<a name="line.377"></a>
-<span class="sourceLineNo">378</span>  private RegionNormalizerChore normalizerChore;<a name="line.378"></a>
-<span class="sourceLineNo">379</span>  private ClusterStatusChore clusterStatusChore;<a name="line.379"></a>
-<span class="sourceLineNo">380</span>  private ClusterStatusPublisher clusterStatusPublisherChore = null;<a name="line.380"></a>
-<span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  CatalogJanitor catalogJanitorChore;<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  private LogCleaner logCleaner;<a name="line.383"></a>
-<span class="sourceLineNo">384</span>  private HFileCleaner hfileCleaner;<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  private ReplicationBarrierCleaner replicationBarrierCleaner;<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  private MobCompactionChore mobCompactChore;<a name="line.387"></a>
-<span class="sourceLineNo">388</span>  private MasterMobCompactionThread mobCompactThread;<a name="line.388"></a>
-<span class="sourceLineNo">389</span>  // used to synchronize the mobCompactionStates<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  private final IdLock mobCompactionLock = new IdLock();<a name="line.390"></a>
-<span class="sourceLineNo">391</span>  // save the information of mob compactions in tables.<a name="line.391"></a>
-<span class="sourceLineNo">392</span>  // the key is table name, the value is the number of compactions in that table.<a name="line.392"></a>
-<span class="sourceLineNo">393</span>  private Map&lt;TableName, AtomicInteger&gt; mobCompactionStates = Maps.newConcurrentMap();<a name="line.393"></a>
-<span class="sourceLineNo">394</span><a name="line.394"></a>
-<span class="sourceLineNo">395</span>  MasterCoprocessorHost cpHost;<a name="line.395"></a>
-<span class="sourceLineNo">396</span><a name="line.396"></a>
-<span class="sourceLineNo">397</span>  private final boolean preLoadTableDescriptors;<a name="line.397"></a>
-<span class="sourceLineNo">398</span><a name="line.398"></a>
-<span class="sourceLineNo">399</span>  // Time stamps for when a hmaster became active<a name="line.399"></a>
-<span class="sourceLineNo">400</span>  private long masterActiveTime;<a name="line.400"></a>
-<span class="sourceLineNo">401</span><a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // Time stamp for when HMaster finishes becoming Active Master<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  private long masterFinishedInitializationTime;<a name="line.403"></a>
-<span class="sourceLineNo">404</span><a name="line.404"></a>
-<span class="sourceLineNo">405</span>  //should we check the compression codec type at master side, default true, HBASE-6370<a name="line.405"></a>
-<span class="sourceLineNo">406</span>  private final boolean masterCheckCompression;<a name="line.406"></a>
-<span class="sourceLineNo">407</span><a name="line.407"></a>
-<span class="sourceLineNo">408</span>  //should we check encryption settings at master side, default true<a name="line.408"></a>
-<span class="sourceLineNo">409</span>  private final boolean masterCheckEncryption;<a name="line.409"></a>
-<span class="sourceLineNo">410</span><a name="line.410"></a>
-<span class="sourceLineNo">411</span>  Map&lt;String, Service&gt; coprocessorServiceHandlers = Maps.newHashMap();<a name="line.411"></a>
-<span class="sourceLineNo">412</span><a name="line.412"></a>
-<span class="sourceLineNo">413</span>  // monitor for snapshot of hbase tables<a name="line.413"></a>
-<span class="sourceLineNo">414</span>  SnapshotManager snapshotManager;<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  // monitor for distributed procedures<a name="line.415"></a>
-<span class="sourceLineNo">416</span>  private MasterProcedureManagerHost mpmHost;<a name="line.416"></a>
-<span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  // it is assigned after 'initialized' guard set to true, so should be volatile<a name="line.418"></a>
-<span class="sourceLineNo">419</span>  private volatile MasterQuotaManager quotaManager;<a name="line.419"></a>
-<span class="sourceLineNo">420</span>  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  private QuotaObserverChore quotaObserverChore;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  private SnapshotQuotaObserverChore snapshotQuotaChore;<a name="line.422"></a>
-<span class="sourceLineNo">423</span><a name="line.423"></a>
-<span class="sourceLineNo">424</span>  private ProcedureExecutor&lt;MasterProcedureEnv&gt; procedureExecutor;<a name="line.424"></a>
-<span class="sourceLineNo">425</span>  private WALProcedureStore procedureStore;<a name="line.425"></a>
-<span class="sourceLineNo">426</span><a name="line.426"></a>
-<span class="sourceLineNo">427</span>  // handle table states<a name="line.427"></a>
-<span class="sourceLineNo">428</span>  private TableStateManager tableStateManager;<a name="line.428"></a>
-<span class="sourceLineNo">429</span><a name="line.429"></a>
-<span class="sourceLineNo">430</span>  private long splitPlanCount;<a name="line.430"></a>
-<span class="sourceLineNo">431</span>  private long mergePlanCount;<a name="line.431"></a>
-<span class="sourceLineNo">432</span><a name="line.432"></a>
-<span class="sourceLineNo">433</span>  /* Handle favored nodes information */<a name="line.433"></a>
-<span class="sourceLineNo">434</span>  private FavoredNodesManager favoredNodesManager;<a name="line.434"></a>
-<span class="sourceLineNo">435</span><a name="line.435"></a>
-<span class="sourceLineNo">436</span>  /** jetty server for master to redirect requests to regionserver infoServer */<a name="line.436"></a>
-<span class="sourceLineNo">437</span>  private Server masterJettyServer;<a name="line.437"></a>
-<span class="sourceLineNo">438</span><a name="line.438"></a>
-<span class="sourceLineNo">439</span>  public static class RedirectServlet extends HttpServlet {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    private static final long serialVersionUID = 2894774810058302473L;<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    private final int regionServerInfoPort;<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    private final String regionServerHostname;<a name="line.442"></a>
-<span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>    /**<a name="line.444"></a>
-<span class="sourceLineNo">445</span>     * @param infoServer that we're trying to send all requests to<a name="line.445"></a>
-<span class="sourceLineNo">446</span>     * @param hostname may be null. if given, will be used for redirects instead of host from client.<a name="line.446"></a>
-<span class="sourceLineNo">447</span>     */<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    public RedirectServlet(InfoServer infoServer, String hostname) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>       regionServerInfoPort = infoServer.getPort();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>       regionServerHostname = hostname;<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    }<a name="line.451"></a>
-<span class="sourceLineNo">452</span><a name="line.452"></a>
-<span class="sourceLineNo">453</span>    @Override<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    public void doGet(HttpServletRequest request,<a name="line.454"></a>
-<span class="sourceLineNo">455</span>        HttpServletResponse response) throws ServletException, IOException {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      String redirectHost = regionServerHostname;<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      if(redirectHost == null) {<a name="line.457"></a>
-<span class="sourceLineNo">458</span>        redirectHost = request.getServerName();<a name="line.458"></a>
-<span class="sourceLineNo">459</span>        if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {<a name="line.459"></a>
-<span class="sourceLineNo">460</span>          LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +<a name="line.460"></a>
-<span class="sourceLineNo">461</span>              MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +<a name="line.461"></a>
-<span class="sourceLineNo">462</span>              "your HBase deployment relies on client accessible names that the region server process " +<a name="line.462"></a>
-<span class="sourceLineNo">463</span>              "can't resolve locally, then you should set the previously mentioned configuration variable " +<a name="line.463"></a>
-<span class="sourceLineNo">464</span>              "to an appropriate hostname.");<a name="line.464"></a>
-<span class="sourceLineNo">465</span>          // no sending client provided input back to the client, so the goal host is just in the logs.<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +<a name="line.466"></a>
-<span class="sourceLineNo">467</span>              "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          return;<a name="line.469"></a>
-<span class="sourceLineNo">470</span>        }<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      }<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      // host and port we're using, but it's buried way too deep to do that ATM.<a name="line.473"></a>
-<span class="sourceLineNo">474</span>      String redirectUrl = request.getScheme() + "://"<a name="line.474"></a>
-<span class="sourceLineNo">475</span>        + redirectHost + ":" + regionServerInfoPort<a name="line.475"></a>
-<span class="sourceLineNo">476</span>        + request.getRequestURI();<a name="line.476"></a>
-<span class="sourceLineNo">477</span>      response.sendRedirect(redirectUrl);<a name="line.477"></a>
-<span class="sourceLineNo">478</span>    }<a name="line.478"></a>
-<span class="sourceLineNo">479</span>  }<a name="line.479"></a>
-<span class="sourceLineNo">480</span><a name="line.480"></a>
-<span class="sourceLineNo">481</span>  /**<a name="line.481"></a>
-<span class="sourceLineNo">482</span>   * Initializes the HMaster. The steps are as follows:<a name="line.482"></a>
-<span class="sourceLineNo">483</span>   * &lt;p&gt;<a name="line.483"></a>
-<span class="sourceLineNo">484</span>   * &lt;ol&gt;<a name="line.484"></a>
-<span class="sourceLineNo">485</span>   * &lt;li&gt;Initialize the local HRegionServer<a name="line.485"></a>
-<span class="sourceLineNo">486</span>   * &lt;li&gt;Start the ActiveMasterManager.<a name="line.486"></a>
-<span class="sourceLineNo">487</span>   * &lt;/ol&gt;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>   * &lt;p&gt;<a name="line.488"></a>
-<span class="sourceLineNo">489</span>   * Remaining steps of initialization occur in<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * #finishActiveMasterInitialization(MonitoredTask) after<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * the master becomes the active one.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public HMaster(final Configuration conf)<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      throws IOException, KeeperException {<a name="line.494"></a>
-<span class="sourceLineNo">495</span>    super(conf);<a name="line.495"></a>
-<span class="sourceLineNo">496</span>    TraceUtil.initTracer(conf);<a name="line.496"></a>
-<span class="sourceLineNo">497</span>    try {<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      this.rsFatals = new MemoryBoundedLogMessageBuffer(<a name="line.498"></a>
-<span class="sourceLineNo">499</span>          conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      LOG.info("hbase.rootdir=" + getRootDir() +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>          ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));<a name="line.501"></a>
-<span class="sourceLineNo">502</span><a name="line.502"></a>
-<span class="sourceLineNo">503</span>      // Disable usage of meta replicas in the master<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      decorateMasterConfiguration(this.conf);<a name="line.506"></a>
-<span class="sourceLineNo">507</span><a name="line.507"></a>
-<span class="sourceLineNo">508</span>      // Hack! Maps DFSClient =&gt; Master for logs.  HDFS made this<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      // config param for task trackers, but we can piggyback off of it.<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      if (this.conf.get("mapreduce.task.attempt.id") == null) {<a name="line.510"></a>
-<span class="sourceLineNo">511</span>        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      }<a name="line.512"></a>
-<span class="sourceLineNo">513</span><a name="line.513"></a>
-<span class="sourceLineNo">514</span>      // should we check the compression codec type at master side, default true, HBASE-6370<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);<a name="line.515"></a>
-<span class="sourceLineNo">516</span><a name="line.516"></a>
-<span class="sourceLineNo">517</span>      // should we check encryption settings at master side, default true<a name="line.517"></a>
-<span class="sourceLineNo">518</span>      this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);<a name="line.518"></a>
-<span class="sourceLineNo">519</span><a name="line.519"></a>
-<span class="sourceLineNo">520</span>      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));<a name="line.520"></a>
-<span class="sourceLineNo">521</span><a name="line.521"></a>
-<span class="sourceLineNo">522</span>      // preload table descriptor at startup<a name="line.522"></a>
-<span class="sourceLineNo">523</span>      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>      this.maxBlancingTime = getMaxBalancingTime();<a name="line.525"></a>
-<span class="sourceLineNo">526</span>      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,<a name="line.526"></a>
-<span class="sourceLineNo">527</span>          HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);<a name="line.527"></a>
-<span class="sourceLineNo">528</span><a name="line.528"></a>
-<span class="sourceLineNo">529</span>      // Do we publish the status?<a name="line.529"></a>
-<span class="sourceLineNo">530</span><a name="line.530"></a>
-<span class="sourceLineNo">531</span>      boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,<a name="line.531"></a>
-<span class="sourceLineNo">532</span>          HConstants.STATUS_PUBLISHED_DEFAULT);<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      Class&lt;? extends ClusterStatusPublisher.Publisher&gt; publisherClass =<a name="line.533"></a>
-<span class="sourceLineNo">534</span>          conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,<a name="line.534"></a>
-<span class="sourceLineNo">535</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,<a name="line.535"></a>
-<span class="sourceLineNo">536</span>              ClusterStatusPublisher.Publisher.class);<a name="line.536"></a>
-<span class="sourceLineNo">537</span><a name="line.537"></a>
-<span class="sourceLineNo">538</span>      if (shouldPublish) {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        if (publisherClass == null) {<a name="line.539"></a>
-<span class="sourceLineNo">540</span>          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +<a name="line.540"></a>
-<span class="sourceLineNo">541</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +<a name="line.541"></a>
-<span class="sourceLineNo">542</span>              " is not set - not publishing status");<a name="line.542"></a>
-<span class="sourceLineNo">543</span>        } else {<a name="line.543"></a>
-<span class="sourceLineNo">544</span>          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>          getChoreService().scheduleChore(clusterStatusPublisherChore);<a name="line.545"></a>
-<span class="sourceLineNo">546</span>        }<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      }<a name="line.547"></a>
-<span class="sourceLineNo">548</span><a name="line.548"></a>
-<span class="sourceLineNo">549</span>      // Some unit tests don't need a cluster, so no zookeeper at all<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      } else {<a name="line.552"></a>
-<span class="sourceLineNo">553</span>        this.activeMasterManager = null;<a name="line.553"></a>
-<span class="sourceLineNo">554</span>      }<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    } catch (Throwable t) {<a name="line.555"></a>
-<span class="sourceLineNo">556</span>      // Make sure we log the exception. HMaster is often started via reflection and the<a name="line.556"></a>
-<span class="sourceLineNo">557</span>      // cause of failed startup is lost.<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      LOG.error("Failed construction of Master", t);<a name="line.558"></a>
-<span class="sourceLineNo">559</span>      throw t;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>  }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>  @Override<a name="line.563"></a>
-<span class="sourceLineNo">564</span>  protected String getUseThisHostnameInstead(Configuration conf) {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    return conf.get(MASTER_HOSTNAME_KEY);<a name="line.565"></a>
-<span class="sourceLineNo">566</span>  }<a name="line.566"></a>
-<span class="sourceLineNo">567</span><a name="line.567"></a>
-<span class="sourceLineNo">568</span>  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  // block in here until then.<a name="line.569"></a>
-<span class="sourceLineNo">570</span>  @Override<a name="line.570"></a>
-<span class="sourceLineNo">571</span>  public void run() {<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    try {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>        Threads.setDaemonThreadRunning(new Thread(() -&gt; {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>          try {<a name="line.575"></a>
-<span class="sourceLineNo">576</span>            int infoPort = putUpJettyServer();<a name="line.576"></a>
-<span class="sourceLineNo">577</span>            startActiveMasterManager(infoPort);<a name="line.577"></a>
-<span class="sourceLineNo">578</span>          } catch (Throwable t) {<a name="line.578"></a>
-<span class="sourceLineNo">579</span>            // Make sure we log the exception.<a name="line.579"></a>
-<span class="sourceLineNo">580</span>            String error = "Failed to become Active Master";<a name="line.580"></a>
-<span class="sourceLineNo">581</span>            LOG.error(error, t);<a name="line.581"></a>
-<span class="sourceLineNo">582</span>            // Abort should have been called already.<a name="line.582"></a>
-<span class="sourceLineNo">583</span>            if (!isAborted()) {<a name="line.583"></a>
-<span class="sourceLineNo">584</span>              abort(error, t);<a name="line.584"></a>
-<span class="sourceLineNo">585</span>            }<a name="line.585"></a>
-<span class="sourceLineNo">586</span>          }<a name="line.586"></a>
-<span class="sourceLineNo">587</span>        }));<a name="line.587"></a>
-<span class="sourceLineNo">588</span>      }<a name="line.588"></a>
-<span class="sourceLineNo">589</span>      // Fall in here even if we have been aborted. Need to run the shutdown services and<a name="line.589"></a>
-<span class="sourceLineNo">590</span>      // the super run call will do this for us.<a name="line.590"></a>
-<span class="sourceLineNo">591</span>      super.run();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    } finally {<a name="line.592"></a>
-<span class="sourceLineNo">593</span>      if (this.clusterSchemaService != null) {<a name="line.593"></a>
-<span class="sourceLineNo">594</span>        // If on way out, then we are no longer active master.<a name="line.594"></a>
-<span class="sourceLineNo">595</span>        this.clusterSchemaService.stopAsync();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>        try {<a name="line.596"></a>
-<span class="sourceLineNo">597</span>          this.clusterSchemaService.awaitTerminated(<a name="line.597"></a>
-<span class="sourceLineNo">598</span>              getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,<a name="line.598"></a>
-<span class="sourceLineNo">599</span>              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);<a name="line.599"></a>
-<span class="sourceLineNo">600</span>        } catch (TimeoutException te) {<a name="line.600"></a>
-<span class="sourceLineNo">601</span>          LOG.warn("Failed shutdown of clusterSchemaService", te);<a name="line.601"></a>
-<span class="sourceLineNo">602</span>        }<a name="line.602"></a>
-<span class="sourceLineNo">603</span>      }<a name="line.603"></a>
-<span class="sourceLineNo">604</span>      this.activeMaster = false;<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    }<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  }<a name="line.606"></a>
-<span class="sourceLineNo">607</span><a name="line.607"></a>
-<span class="sourceLineNo">608</span>  // return the actual infoPort, -1 means disable info server.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>  private int putUpJettyServer() throws IOException {<a name="line.609"></a>
-<span class="sourceLineNo">610</span>    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      return -1;<a name="line.611"></a>
-<span class="sourceLineNo">612</span>    }<a name="line.612"></a>
-<span class="sourceLineNo">613</span>    final int infoPort = conf.getInt("hbase.master.info.port.orig",<a name="line.613"></a>
-<span class="sourceLineNo">614</span>      HConstants.DEFAULT_MASTER_INFOPORT);<a name="line.614"></a>
-<span class="sourceLineNo">615</span>    // -1 is for disabling info server, so no redirecting<a name="line.615"></a>
-<span class="sourceLineNo">616</span>    if (infoPort &lt; 0 || infoServer == null) {<a name="line.616"></a>
-<span class="sourceLineNo">617</span>      return -1;<a name="line.617"></a>
-<span class="sourceLineNo">618</span>    }<a name="line.618"></a>
-<span class="sourceLineNo">619</span>    if(infoPort == infoServer.getPort()) {<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      return infoPort;<a name="line.620"></a>
-<span class="sourceLineNo">621</span>    }<a name="line.621"></a>
-<span class="sourceLineNo">622</span>    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");<a name="line.622"></a>
-<span class="sourceLineNo">623</span>    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {<a name="line.623"></a>
-<span class="sourceLineNo">624</span>      String msg =<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          "Failed to start redirecting jetty server. Address " + addr<a name="line.625"></a>
-<span class="sourceLineNo">626</span>              + " does not belong to this host. Correct configuration parameter: "<a name="line.626"></a>
-<span class="sourceLineNo">627</span>              + "hbase.master.info.bindAddress";<a name="line.627"></a>
-<span class="sourceLineNo">628</span>      LOG.error(msg);<a name="line.628"></a>
-<span class="sourceLineNo">629</span>      throw new IOException(msg);<a name="line.629"></a>
-<span class="sourceLineNo">630</span>    }<a name="line.630"></a>
-<span class="sourceLineNo">631</span><a name="line.631"></a>
-<span class="sourceLineNo">632</span>    // TODO I'm pretty sure we could just add another binding to the InfoServer run by<a name="line.632"></a>
-<span class="sourceLineNo">633</span>    // the RegionServer and have it run the RedirectServlet instead of standing up<a name="line.633"></a>
-<span class="sourceLineNo">634</span>    // a second entire stack here.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>    masterJettyServer = new Server();<a name="line.635"></a>
-<span class="sourceLineNo">636</span>    final ServerConnector connector = new ServerConnector(masterJettyServer);<a name="line.636"></a>
-<span class="sourceLineNo">637</span>    connector.setHost(addr);<a name="line.637"></a>
-<span class="sourceLineNo">638</span>    connector.setPort(infoPort);<a name="line.638"></a>
-<span class="sourceLineNo">639</span>    masterJettyServer.addConnector(connector);<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    masterJettyServer.setStopAtShutdown(true);<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    final String redirectHostname =<a name="line.642"></a>
-<span class="sourceLineNo">643</span>        StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;<a name="line.643"></a>
-<span class="sourceLineNo">644</span><a name="line.644"></a>
-<span class="sourceLineNo">645</span>    final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    context.addServlet(new ServletHolder(redirect), "/*");<a name="line.647"></a>
-<span class="sourceLineNo">648</span>    context.setServer(masterJettyServer);<a name="line.648"></a>
-<span class="sourceLineNo">649</span><a name="line.649"></a>
-<span class="sourceLineNo">650</span>    try {<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      masterJettyServer.start();<a name="line.651"></a>
-<span class="sourceLineNo">652</span>    } catch (Exception e) {<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      throw new IOException("Failed to start redirecting jetty server", e);<a name="line.653"></a>
-<span class="sourceLineNo">654</span>    }<a name="line.654"></a>
-<span class="sourceLineNo">655</span>    return connector.getLocalPort();<a name="line.655"></a>
-<span class="sourceLineNo">656</span>  }<a name="line.656"></a>
-<span class="sourceLineNo">657</span><a name="line.657"></a>
-<span class="sourceLineNo">658</span>  @Override<a name="line.658"></a>
-<span class="sourceLineNo">659</span>  protected Function&lt;TableDescriptorBuilder, TableDescriptorBuilder&gt; getMetaTableObserver() {<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    return builder -&gt; builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));<a name="line.660"></a>
-<span class="sourceLineNo">661</span>  }<a name="line.661"></a>
-<span class="sourceLineNo">662</span>  /**<a name="line.662"></a>
-<span class="sourceLineNo">663</span>   * For compatibility, if failed with regionserver credentials, try the master one<a name="line.663"></a>
-<span class="sourceLineNo">664</span>   */<a name="line.664"></a>
-<span class="sourceLineNo">665</span>  @Override<a name="line.665"></a>
-<span class="sourceLineNo">666</span>  protected void login(UserProvider user, String host) throws IOException {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>    try {<a name="line.667"></a>
-<span class="sourceLineNo">668</span>      super.login(user, host);<a name="line.668"></a>
-<span class="sourceLineNo">669</span>    } catch (IOException ie) {<a name="line.669"></a>
-<span class="sourceLineNo">670</span>      user.login("hbase.master.keytab.file",<a name="line.670"></a>
-<span class="sourceLineNo">671</span>        "hbase.master.kerberos.principal", host);<a name="line.671"></a>
-<span class="sourceLineNo">672</span>    }<a name="line.672"></a>
-<span class="sourceLineNo">673</span>  }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>  /**<a name="line.675"></a>
-<span class="sourceLineNo">676</span>   * If configured to put regions on active master,<a name="line.676"></a>
-<span class="sourceLineNo">677</span>   * wait till a backup master becomes active.<a name="line.677"></a>
-<span class="sourceLineNo">678</span>   * Otherwise, loop till the server is stopped or aborted.<a name="line.678"></a>
-<span class="sourceLineNo">679</span>   */<a name="line.679"></a>
-<span class="sourceLineNo">680</span>  @Override<a name="line.680"></a>
-<span class="sourceLineNo">681</span>  protected void waitForMasterActive(){<a name="line.681"></a>
-<span class="sourceLineNo">682</span>    boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);<a name="line.682"></a>
-<span class="sourceLineNo">683</span>    while (!(tablesOnMaster &amp;&amp; activeMaster) &amp;&amp; !isStopped() &amp;&amp; !isAborted()) {<a name="line.683"></a>
-<span class="sourceLineNo">684</span>      sleeper.sleep();<a name="line.684"></a>
-<span class="sourceLineNo">685</span>    }<a name="line.685"></a>
-<span class="sourceLineNo">686</span>  }<a name="line.686"></a>
-<span class="sourceLineNo">687</span><a name="line.687"></a>
-<span class="sourceLineNo">688</span>  @VisibleForTesting<a name="line.688"></a>
-<span class="sourceLineNo">689</span>  public MasterRpcServices getMasterRpcServices() {<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    return (MasterRpcServices)rpcServices;<a name="line.690"></a>
-<span class="sourceLineNo">691</span>  }<a name="line.691"></a>
-<span class="sourceLineNo">692</span><a name="line.692"></a>
-<span class="sourceLineNo">693</span>  public boolean balanceSwitch(final boolean b) throws IOException {<a name="line.693"></a>
-<span class="sourceLineNo">694</span>    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);<a name="line.694"></a>
-<span class="sourceLineNo">695</span>  }<a name="line.695"></a>
-<span class="sourceLineNo">696</span><a name="line.696"></a>
-<span class="sourceLineNo">697</span>  @Override<a name="line.697"></a>
-<span class="sourceLineNo">698</span>  protected String getProcessName() {<a name="line.698"></a>
-<span class="sourceLineNo">699</span>    return MASTER;<a name="line.699"></a>
-<span class="sourceLineNo">700</span>  }<a name="line.700"></a>
-<span class="sourceLineNo">701</span><a name="line.701"></a>
-<span class="sourceLineNo">702</span>  @Override<a name="line.702"></a>
-<span class="sourceLineNo">703</span>  protected boolean canCreateBaseZNode() {<a name="line.703"></a>
-<span class="sourceLineNo">704</span>    return true;<a name="line.704"></a>
-<span class="sourceLineNo">705</span>  }<a name="line.705"></a>
-<span class="sourceLineNo">706</span><a name="line.706"></a>
-<span class="sourceLineNo">707</span>  @Override<a name="line.707"></a>
-<span class="sourceLineNo">708</span>  protected boolean canUpdateTableDescriptor() {<a name="line.708"></a>
-<span class="sourceLineNo">709</span>    return true;<a name="line.709"></a>
-<span class="sourceLineNo">710</span>  }<a name="line.710"></a>
-<span class="sourceLineNo">711</span><a name="line.711"></a>
-<span class="sourceLineNo">712</span>  @Override<a name="line.712"></a>
-<span class="sourceLineNo">713</span>  protected RSRpcServices createRpcServices() throws IOException {<a name="line.713"></a>
-<span class="sourceLineNo">714</span>    return new MasterRpcServices(this);<a name="line.714"></a>
-<span class="sourceLineNo">715</span>  }<a name="line.715"></a>
-<span class="sourceLineNo">716</span><a name="line.716"></a>
-<span class="sourceLineNo">717</span>  @Override<a name="line.717"></a>
-<span class="sourceLineNo">718</span>  protected void configureInfoServer() {<a name="line.718"></a>
-<span class="sourceLineNo">719</span>    infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);<a name="line.719"></a>
-<span class="sourceLineNo">720</span>    infoServer.setAttribute(MASTER, this);<a name="line.720"></a>
-<span class="sourceLineNo">721</span>    if (LoadBalancer.isTablesOnMaster(conf)) {<a name="line.721"></a>
-<span class="sourceLineNo">722</span>      super.configureInfoServer();<a name="line.722"></a>
-<span class="sourceLineNo">723</span>    }<a name="line.723"></a>
-<span class="sourceLineNo">724</span>  }<a name="line.724"></a>
-<span class="sourceLineNo">725</span><a name="line.725"></a>
-<span class="sourceLineNo">726</span>  @Override<a name="line.726"></a>
-<span class="sourceLineNo">727</span>  protected Class&lt;? extends HttpServlet&gt; getDumpServlet() {<a name="line.727"></a>
-<span class="sourceLineNo">728</span>    return MasterDumpServlet.class;<a name="line.728"></a>
-<span class="sourceLineNo">729</span>  }<a name="line.729"></a>
-<span class="sourceLineNo">730</span><a name="line.730"></a>
-<span class="sourceLineNo">731</span>  @Override<a name="line.731"></a>
-<span class="sourceLineNo">732</span>  public MetricsMaster getMasterMetrics() {<a name="line.732"></a>
-<span class="sourceLineNo">733</span>    return metricsMaster;<a name="line.733"></a>
-<span class="sourceLineNo">734</span>  }<a name="line.734"></a>
-<span class="sourceLineNo">735</span><a name="line.735"></a>
-<span class="sourceLineNo">736</span>  /**<a name="line.736"></a>
-<span class="sourceLineNo">737</span>   * &lt;p&gt;<a name="line.737"></a>
-<span class="sourceLineNo">738</span>   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it<a name="line.738"></a>
-<span class="sourceLineNo">739</span>   * should have already been initialized along with {@link ServerManager}.<a name="line.739"></a>
-<span class="sourceLineNo">740</span>   * &lt;/p&gt;<a name="line.740"></a>
-<span class="sourceLineNo">741</span>   * &lt;p&gt;<a name="line.741"></a>
-<span class="sourceLineNo">742</span>   * Will be overridden in tests.<a name="line.742"></a>
-<span class="sourceLineNo">743</span>   * &lt;/p&gt;<a name="line.743"></a>
-<span class="sourceLineNo">744</span>   */<a name="line.744"></a>
-<span class="sourceLineNo">745</span>  @VisibleForTesting<a name="line.745"></a>
-<span class="sourceLineNo">746</span>  protected void initializeZKBasedSystemTrackers()<a name="line.746"></a>
-<span class="sourceLineNo">747</span>      throws IOException, InterruptedException, KeeperException, ReplicationException {<a name="line.747"></a>
-<span class="sourceLineNo">748</span>    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);<a name="line.748"></a>
-<span class="sourceLineNo">749</span>    this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);<a name="line.749"></a>
-<span class="sourceLineNo">750</span>    this.normalizer.setMasterServices(this);<a name="line.750"></a>
-<span class="sourceLineNo">751</span>    this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);<a name="line.751"></a>
-<span class="sourceLineNo">752</span>    this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);<a name="line.752"></a>
-<span class="sourceLineNo">753</span>    this.loadBalancerTracker.start();<a name="line.753"></a>
-<span class="sourceLineNo">754</span><a name="line.754"></a>
-<span class="sourceLineNo">755</span>    this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    this.regionNormalizerTracker.start();<a name="line.756"></a>
-<span class="sourceLineNo">757</span><a name="line.757"></a>
-<span class="sourceLineNo">758</span>    this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);<a name="line.758"></a>
-<span class="sourceLineNo">759</span>    this.splitOrMergeTracker.start();<a name="line.759"></a>
-<span class="sourceLineNo">760</span><a name="line.760"></a>
-<span class="sourceLineNo">761</span>    this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);<a name="line.761"></a>
-<span class="sourceLineNo">762</span>    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);<a name="line.762"></a>
-<span class="sourceLineNo">763</span><a name="line.763"></a>
-<span class="sourceLineNo">764</span>    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);<a name="line.764"></a>
-<span class="sourceLineNo">765</span>    this.drainingServerTracker.start();<a name="line.765"></a>
-<span class="sourceLineNo">766</span><a name="line.766"></a>
-<span class="sourceLineNo">767</span>    this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);<a name="line.767"></a>
-<span class="sourceLineNo">768</span>    this.maintenanceModeTracker.start();<a name="line.768"></a>
-<span class="sourceLineNo">769</span><a name="line.769"></a>
-<span class="sourceLineNo">770</span>    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);<a name="line.770"></a>
-<span class="sourceLineNo">771</span>    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,<a name="line.771"></a>
-<span class="sourceLineNo">772</span>      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);<a name="line.772"></a>
-<span class="sourceLineNo">773</span>    if (clientQuorumServers != null &amp;&amp; !clientZkObserverMode) {<a name="line.773"></a>
-<span class="sourceLineNo">774</span>      // we need to take care of the ZK information synchronization<a name="line.774"></a>
-<span class="sourceLineNo">775</span>      // if given client ZK are not observer nodes<a name="line.775"></a>
-<span class="sourceLineNo">776</span>      ZKWatcher clientZkWatcher = new ZKWatcher(conf,<a name="line.776"></a>
-<span class="sourceLineNo">777</span>          getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,<a name="line.777"></a>
-<span class="sourceLineNo">778</span>          false, true);<a name="line.778"></a>
-<span class="sourceLineNo">779</span>      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);<a name="line.779"></a>
-<span class="sourceLineNo">780</span>      this.metaLocationSyncer.start();<a name="line.780"></a>
-<span class="sourceLineNo">781</span>      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);<a name="line.781"></a>
-<span class="sourceLineNo">782</span>      this.masterAddressSyncer.start();<a name="line.782"></a>
-<span class="sourceLineNo">783</span>      // set cluster id is a one-go effort<a name="line.783"></a>
-<span class="sourceLineNo">784</span>      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());<a name="line.784"></a>
-<span class="sourceLineNo">785</span>    }<a name="line.785"></a>
-<span class="sourceLineNo">786</span><a name="line.786"></a>
-<span class="sourceLineNo">787</span>    // Set the cluster as up.  If new RSs, they'll be waiting on this before<a name="line.787"></a>
-<span class="sourceLineNo">788</span>    // going ahead with their startup.<a name="line.788"></a>
-<span class="sourceLineNo">789</span>    boolean wasUp = this.clusterStatusTracker.isClusterUp();<a name="line.789"></a>
-<span class="sourceLineNo">790</span>    if (!wasUp) this.clusterStatusTracker.setClusterUp();<a name="line.790"></a>
-<span class="sourceLineNo">791</span><a name="line.791"></a>
-<span class="sourceLineNo">792</span>    LOG.info("Active/primary master=" + this.serverName +<a name="line.792"></a>
-<span class="sourceLineNo">793</span>        ", sessionid=0x" +<a name="line.793"></a>
-<span class="sourceLineNo">794</span>        Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +<a name="line.794"></a>
-<span class="sourceLineNo">795</span>        ", setting cluster-up flag (Was=" + wasUp + ")");<a name="line.795"></a>
-<span class="sourceLineNo">796</span><a name="line.796"></a>
-<span class="sourceLineNo">797</span>    // create/initialize the snapshot manager and other procedure managers<a name="line.797"></a>
-<span class="sourceLineNo">798</span>    this.snapshotManager = new SnapshotManager();<a name="line.798"></a>
-<span class="sourceLineNo">799</span>    this.mpmHost = new MasterProcedureManagerHost();<a name="line.799"></a>
-<span class="sourceLineNo">800</span>    this.mpmHost.register(this.snapshotManager);<a name="line.800"></a>
-<span class="sourceLineNo">801</span>    this.mpmHost.register(new MasterFlushTableProcedureManager());<a name="line.801"></a>
-<span class="sourceLineNo">802</span>    this.mpmHost.loadProcedures(conf);<a name="line.802"></a>
-<span class="sourceLineNo">803</span>    this.mpmHost.initialize(this, this.metricsMaster);<a name="line.803"></a>
-<span class="sourceLineNo">804</span>  }<a name="line.804"></a>
-<span class="sourceLineNo">805</span><a name="line.805"></a>
-<span class="sourceLineNo">806</span>  private static final ImmutableSet&lt;Class&lt;?&gt;&gt; UNSUPPORTED_PROCEDURES =<a name="line.806"></a>
-<span class="sourceLineNo">807</span>    ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class,<a name="line.807"></a>
-<span class="sourceLineNo">808</span>      MoveRegionProcedure.class);<a name="line.808"></a>
-<span class="sourceLineNo">809</span><a name="line.809"></a>
-<span class="sourceLineNo">810</span>  /**<a name="line.810"></a>
-<span class="sourceLineNo">811</span>   * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is<a name="line.811"></a>
-<span class="sourceLineNo">812</span>   * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to<a name="line.812"></a>
-<span class="sourceLineNo">813</span>   * make sure that there are none these procedures when upgrading. If there are, the master will<a name="line.813"></a>
-<span class="sourceLineNo">814</span>   * quit, you need to go back to the old version to finish these procedures first before upgrading.<a name="line.814"></a>
-<span class="sourceLineNo">815</span>   */<a name="line.815"></a>
-<span class="sourceLineNo">816</span>  private void checkUnsupportedProcedure(<a name="line.816"></a>
-<span class="sourceLineNo">817</span>      Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType)<a name="line.817"></a>
-<span class="sourceLineNo">818</span>      throws HBaseIOException {<a name="line.818"></a>
-<span class="sourceLineNo">819</span>    // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to<a name="line.819"></a>
-<span class="sourceLineNo">820</span>    // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as<a name="line.820"></a>
-<span class="sourceLineNo">821</span>    // there will be conflict in the code for AM. We should finish all these procedures before<a name="line.821"></a>
-<span class="sourceLineNo">822</span>    // upgrading.<a name="line.822"></a>
-<span class="sourceLineNo">823</span>    for (Class&lt;?&gt; clazz : UNSUPPORTED_PROCEDURES) {<a name="line.823"></a>
-<span class="sourceLineNo">824</span>      List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt; procs = procsByType.get(clazz);<a name="line.824"></a>
-<span class="sourceLineNo">825</span>      if (procs != null) {<a name="line.825"></a>
-<span class="sourceLineNo">826</span>        LOG.error(<a name="line.826"></a>
-<span class="sourceLineNo">827</span>          "Unsupported procedure type {} found, please rollback your master to the old" +<a name="line.827"></a>
-<span class="sourceLineNo">828</span>            " version to finish them, and then try to upgrade again. The full procedure list: {}",<a name="line.828"></a>
-<span class="sourceLineNo">829</span>          clazz, procs);<a name="line.829"></a>
-<span class="sourceLineNo">830</span>        throw new HBaseIOException("Unsupported procedure type " + clazz + " found");<a name="line.830"></a>
-<span class="sourceLineNo">831</span>      }<a name="line.831"></a>
-<span class="sourceLineNo">832</span>    }<a name="line.832"></a>
-<span class="sourceLineNo">833</span>    // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to<a name="line.833"></a>
-<span class="sourceLineNo">834</span>    // make sure that no one will try to schedule it but SCP does have a state which will schedule<a name="line.834"></a>
-<span class="sourceLineNo">835</span>    // it.<a name="line.835"></a>
-<span class="sourceLineNo">836</span>    if (procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.836"></a>
-<span class="sourceLineNo">837</span>      .map(p -&gt; (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState)) {<a name="line.837"></a>
-<span class="sourceLineNo">838</span>      LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," +<a name="line.838"></a>
-<span class="sourceLineNo">839</span>        " which is not supported any more. Please rollback your master to the old version to" +<a name="line.839"></a>
-<span class="sourceLineNo">840</span>        " finish them, and then try to upgrade again.");<a name="line.840"></a>
-<span class="sourceLineNo">841</span>      throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure");<a name="line.841"></a>
-<span class="sourceLineNo">842</span>    }<a name="line.842"></a>
-<span class="sourceLineNo">843</span>  }<a name="line.843"></a>
-<span class="sourceLineNo">844</span><a name="line.844"></a>
-<span class="sourceLineNo">845</span>  /**<a name="line.845"></a>
-<span class="sourceLineNo">846</span>   * Finish initialization of HMaster after becoming the primary master.<a name="line.846"></a>
-<span class="sourceLineNo">847</span>   * &lt;p/&gt;<a name="line.847"></a>
-<span class="sourceLineNo">848</span>   * The startup order is a bit complicated but very important, do not change it unless you know<a name="line.848"></a>
-<span class="sourceLineNo">849</span>   * what you are doing.<a name="line.849"></a>
-<span class="sourceLineNo">850</span>   * &lt;ol&gt;<a name="line.850"></a>
-<span class="sourceLineNo">851</span>   * &lt;li&gt;Initialize file system based components - file system manager, wal manager, table<a name="line.851"></a>
-<span class="sourceLineNo">852</span>   * descriptors, etc&lt;/li&gt;<a name="line.852"></a>
-<span class="sourceLineNo">853</span>   * &lt;li&gt;Publish cluster id&lt;/li&gt;<a name="line.853"></a>
-<span class="sourceLineNo">854</span>   * &lt;li&gt;Here comes the most complicated part - initialize server manager, assignment manager and<a name="line.854"></a>
-<span class="sourceLineNo">855</span>   * region server tracker<a name="line.855"></a>
-<span class="sourceLineNo">856</span>   * &lt;ol type='i'&gt;<a name="line.856"></a>
-<span class="sourceLineNo">857</span>   * &lt;li&gt;Create server manager&lt;/li&gt;<a name="line.857"></a>
-<span class="sourceLineNo">858</span>   * &lt;li&gt;Create procedure executor, load the procedures, but do not start workers. We will start it<a name="line.858"></a>
-<span class="sourceLineNo">859</span>   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same<a name="line.859"></a>
-<span class="sourceLineNo">860</span>   * server&lt;/li&gt;<a name="line.860"></a>
-<span class="sourceLineNo">861</span>   * &lt;li&gt;Create assignment manager and start it, load the meta region state, but do not load data<a name="line.861"></a>
-<span class="sourceLineNo">862</span>   * from meta region&lt;/li&gt;<a name="line.862"></a>
-<span class="sourceLineNo">863</span>   * &lt;li&gt;Start region server tracker, construct the online servers set and find out dead servers and<a name="line.863"></a>
-<span class="sourceLineNo">864</span>   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also<a name="line.864"></a>
-<span class="sourceLineNo">865</span>   * scan the wal directory to find out possible live region servers, and the differences between<a name="line.865"></a>
-<span class="sourceLineNo">866</span>   * these two sets are the dead servers&lt;/li&gt;<a name="line.866"></a>
-<span class="sourceLineNo">867</span>   * &lt;/ol&gt;<a name="line.867"></a>
-<span class="sourceLineNo">868</span>   * &lt;/li&gt;<a name="line.868"></a>
-<span class="sourceLineNo">869</span>   * &lt;li&gt;If this is a new deploy, schedule a InitMetaProcedure to initialize meta&lt;/li&gt;<a name="line.869"></a>
-<span class="sourceLineNo">870</span>   * &lt;li&gt;Start necessary service threads - balancer, catalog janior, executor services, and also the<a name="line.870"></a>
-<span class="sourceLineNo">871</span>   * procedure executor, etc. Notice that the balancer must be created first as assignment manager<a name="line.871"></a>
-<span class="sourceLineNo">872</span>   * may use it when assigning regions.&lt;/li&gt;<a name="line.872"></a>
-<span class="sourceLineNo">873</span>   * &lt;li&gt;Wait for meta to be initialized if necesssary, start table state manager.&lt;/li&gt;<a name="line.873"></a>
-<span class="sourceLineNo">874</span>   * &lt;li&gt;Wait for enough region servers to check-in&lt;/li&gt;<a name="line.874"></a>
-<span class="sourceLineNo">875</span>   * &lt;li&gt;Let assignment manager load data from meta and construct region states&lt;/li&gt;<a name="line.875"></a>
-<span class="sourceLineNo">876</span>   * &lt;li&gt;Start all other things such as chore services, etc&lt;/li&gt;<a name="line.876"></a>
-<span class="sourceLineNo">877</span>   * &lt;/ol&gt;<a name="line.877"></a>
-<span class="sourceLineNo">878</span>   * &lt;p/&gt;<a name="line.878"></a>
-<span class="sourceLineNo">879</span>   * Notice that now we will not schedule a special procedure to make meta online(unless the first<a name="line.879"></a>
-<span class="sourceLineNo">880</span>   * time where meta has not been created yet), we will rely on SCP to bring meta online.<a name="line.880"></a>
-<span class="sourceLineNo">881</span>   */<a name="line.881"></a>
-<span class="sourceLineNo">882</span>  private void finishActiveMasterInitialization(MonitoredTask status) throws IOException,<a name="line.882"></a>
-<span class="sourceLineNo">883</span>          InterruptedException, KeeperException, ReplicationException {<a name="line.883"></a>
-<span class="sourceLineNo">884</span>    Thread zombieDetector = new Thread(new InitializationMonitor(this),<a name="line.884"></a>
-<span class="sourceLineNo">885</span>        "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());<a name="line.885"></a>
-<span class="sourceLineNo">886</span>    zombieDetector.setDaemon(true);<a name="line.886"></a>
-<span class="sourceLineNo">887</span>    zombieDetector.start();<a name="line.887"></a>
-<span class="sourceLineNo">888</span><a name="line.888"></a>
-<span class="sourceLineNo">889</span>    /*<a name="line.889"></a>
-<span class="sourceLineNo">890</span>     * We are active master now... go initialize components we need to run.<a name="line.890"></a>
-<span class="sourceLineNo">891</span>     */<a name="line.891"></a>
-<span class="sourceLineNo">892</span>    status.setStatus("Initializing Master file system");<a name="line.892"></a>
-<span class="sourceLineNo">893</span><a name="line.893"></a>
-<span class="sourceLineNo">894</span>    this.masterActiveTime = System.currentTimeMillis();<a name="line.894"></a>
-<span class="sourceLineNo">895</span>    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.<a name="line.895"></a>
-<span class="sourceLineNo">896</span>    // Initialize the chunkCreator<a name="line.896"></a>
-<span class="sourceLineNo">897</span>    initializeMemStoreChunkCreator();<a name="line.897"></a>
-<span class="sourceLineNo">898</span>    this.fileSystemManager = new MasterFileSystem(conf);<a name="line.898"></a>
-<span class="sourceLineNo">899</span>    this.walManager = new MasterWalManager(this);<a name="line.899"></a>
-<span class="sourceLineNo">900</span><a name="line.900"></a>
-<span class="sourceLineNo">901</span>    // enable table descriptors cache<a name="line.901"></a>
-<span class="sourceLineNo">902</span>    this.tableDescriptors.setCacheOn();<a name="line.902"></a>
-<span class="sourceLineNo">903</span><a name="line.903"></a>
-<span class="sourceLineNo">904</span>    // warm-up HTDs cache on master initialization<a name="line.904"></a>
-<span class="sourceLineNo">905</span>    if (preLoadTableDescriptors) {<a name="line.905"></a>
-<span class="sourceLineNo">906</span>      status.setStatus("Pre-loading table descriptors");<a name="line.906"></a>
-<span class="sourceLineNo">907</span>      this.tableDescriptors.getAll();<a name="line.907"></a>
-<span class="sourceLineNo">908</span>    }<a name="line.908"></a>
-<span class="sourceLineNo">909</span><a name="line.909"></a>
-<span class="sourceLineNo">910</span>    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but<a name="line.910"></a>
-<span class="sourceLineNo">911</span>    // only after it has checked in with the Master. At least a few tests ask Master for clusterId<a name="line.911"></a>
-<span class="sourceLineNo">912</span>    // before it has called its run method and before RegionServer has done the reportForDuty.<a name="line.912"></a>
-<span class="sourceLineNo">913</span>    ClusterId clusterId = fileSystemManager.getClusterId();<a name="line.913"></a>
-<span class="sourceLineNo">914</span>    status.setStatus("Publishing Cluster ID " + clusterId + " in ZooKeeper");<a name="line.914"></a>
-<span class="sourceLineNo">915</span>    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());<a name="line.915"></a>
-<span class="sourceLineNo">916</span>    this.clusterId = clusterId.toString();<a name="line.916"></a>
-<span class="sourceLineNo">917</span><a name="line.917"></a>
-<span class="sourceLineNo">918</span>    status.setStatus("Initialze ServerManager and schedule SCP for crash servers");<a name="line.918"></a>
-<span class="sourceLineNo">919</span>    this.serverManager = createServerManager(this);<a name="line.919"></a>
-<span class="sourceLineNo">920</span>    createProcedureExecutor();<a name="line.920"></a>
-<span class="sourceLineNo">921</span>    @SuppressWarnings("rawtypes")<a name="line.921"></a>
-<span class="sourceLineNo">922</span>    Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType =<a name="line.922"></a>
-<span class="sourceLineNo">923</span>      procedureExecutor.getActiveProceduresNoCopy().stream()<a name="line.923"></a>
-<span class="sourceLineNo">924</span>        .collect(Collectors.groupingBy(p -&gt; p.getClass()));<a name="line.924"></a>
-<span class="sourceLineNo">925</span><a name="line.925"></a>
-<span class="sourceLineNo">926</span>    checkUnsupportedProcedure(procsByType);<a name="line.926"></a>
-<span class="sourceLineNo">927</span><a name="line.927"></a>
-<span class="sourceLineNo">928</span>    // Create Assignment Manager<a name="line.928"></a>
-<span class="sourceLineNo">929</span>    this.assignmentManager = new AssignmentManager(this);<a name="line.929"></a>
-<span class="sourceLineNo">930</span>    this.assignmentManager.start();<a name="line.930"></a>
-<span class="sourceLineNo">931</span>    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as<a name="line.931"></a>
-<span class="sourceLineNo">932</span>    // completed, it could still be in the procedure list. This is a bit strange but is another<a name="line.932"></a>
-<span class="sourceLineNo">933</span>    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.<a name="line.933"></a>
-<span class="sourceLineNo">934</span>    List&lt;TransitRegionStateProcedure&gt; ritList =<a name="line.934"></a>
-<span class="sourceLineNo">935</span>      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()<a name="line.935"></a>
-<span class="sourceLineNo">936</span>        .filter(p -&gt; !p.isFinished()).map(p -&gt; (TransitRegionStateProcedure) p)<a name="line.936"></a>
-<span class="sourceLineNo">937</span>        .collect(Collectors.toList());<a name="line.937"></a>
-<span class="sourceLineNo">938</span>    this.assignmentManager.setupRIT(ritList);<a name="line.938"></a>
-<span class="sourceLineNo">939</span><a name="line.939"></a>
-<span class="sourceLineNo">940</span>    this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);<a name="line.940"></a>
-<span class="sourceLineNo">941</span>    this.regionServerTracker.start(<a name="line.941"></a>
-<span class="sourceLineNo">942</span>      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.942"></a>
-<span class="sourceLineNo">943</span>        .map(p -&gt; (ServerCrashProcedure) p).map(p -&gt; p.getServerName()).collect(Collectors.toSet()),<a name="line.943"></a>
-<span class="sourceLineNo">944</span>      walManager.getLiveServersFromWALDir());<a name="line.944"></a>
-<span class="sourceLineNo">945</span>    // This manager will be started AFTER hbase:meta is confirmed on line.<a name="line.945"></a>
-<span class="sourceLineNo">946</span>    // hbase.mirror.table.state.to.zookeeper is so hbase1 clients can connect. They read table<a name="line.946"></a>
-<span class="sourceLineNo">947</span>    // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients.<a name="line.947"></a>
-<span class="sourceLineNo">948</span>    this.tableStateManager =<a name="line.948"></a>
-<span class="sourceLineNo">949</span>      this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true)<a name="line.949"></a>
-<span class="sourceLineNo">950</span>        ?<a name="line.950"></a>
-<span class="sourceLineNo">951</span>        new MirroringTableStateManager(this):<a name="line.951"></a>
-<span class="sourceLineNo">952</span>        new TableStateManager(this);<a name="line.952"></a>
-<span class="sourceLineNo">953</span><a name="line.953"></a>
-<span class="sourceLineNo">954</span>    status.setStatus("Initializing ZK system trackers");<a name="line.954"></a>
-<span class="sourceLineNo">955</span>    initializeZKBasedSystemTrackers();<a name="line.955"></a>
-<span class="sourceLineNo">956</span>    status.setStatus("Loading last flushed sequence id of regions");<a name="line.956"></a>
-<span class="sourceLineNo">957</span>    try {<a name="line.957"></a>
-<span class="sourceLineNo">958</span>      this.serverManager.loadLastFlushedSequenceIds();<a name="line.958"></a>
-<span class="sourceLineNo">959</span>    } catch (IOException e) {<a name="line.959"></a>
-<span class="sourceLineNo">960</span>      LOG.debug("Failed to load last flushed sequence id of regions"<a name="line.960"></a>
-<span class="sourceLineNo">961</span>          + " from file system", e);<a name="line.961"></a>
-<span class="sourceLineNo">962</span>    }<a name="line.962"></a>
-<span class="sourceLineNo">963</span>    // Set ourselves as active Master now our claim has succeeded up in zk.<a name="line.963"></a>
-<span class="sourceLineNo">964</span>    this.activeMaster = true;<a name="line.964"></a>
-<span class="sourceLineNo">965</span><a name="line.965"></a>
-<span class="sourceLineNo">966</span>    // This is for backwards compatibility<a name="line.966"></a>
-<span class="sourceLineNo">967</span>    // See HBASE-11393<a name="line.967"></a>
-<span class="sourceLineNo">968</span>    status.setStatus("Update TableCFs node in ZNode");<a name="line.968"></a>
-<span class="sourceLineNo">969</span>    ReplicationPeerConfigUpgrader tableCFsUpdater =<a name="line.969"></a>
-<span class="sourceLineNo">970</span>        new ReplicationPeerConfigUpgrader(zooKeeper, conf);<a name="line.970"></a>
-<span class="sourceLineNo">971</span>    tableCFsUpdater.copyTableCFs();<a name="line.971"></a>
-<span class="sourceLineNo">972</span><a name="line.972"></a>
-<span class="sourceLineNo">973</span>    // Add the Observer to delete quotas on table deletion before starting all CPs by<a name="line.973"></a>
-<span class="sourceLineNo">974</span>    // default with quota support, avoiding if user specifically asks to not load this Observer.<a name="line.974"></a>
-<span class="sourceLineNo">975</span>    if (QuotaUtil.isQuotaEnabled(conf)) {<a name="line.975"></a>
-<span class="sourceLineNo">976</span>      updateConfigurationForQuotasObserver(conf);<a name="line.976"></a>
-<span class="sourceLineNo">977</span>    }<a name="line.977"></a>
-<span class="sourceLineNo">978</span>    // initialize master side coprocessors before we start handling requests<a name="line.978"></a>
-<span class="sourceLineNo">979</span>    status.setStatus("Initializing master coprocessors");<a name="line.979"></a>
-<span class="sourceLineNo">980</span>    this.cpHost = new MasterCoprocessorHost(this, this.conf);<a name="line.980"></a>
+<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HBaseFsck;<a name="line.196"></a>
+<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.197"></a>
+<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.198"></a>
+<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.199"></a>
+<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.200"></a>
+<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.201"></a>
+<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.203"></a>
+<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.204"></a>
+<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.207"></a>
+<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.211"></a>
+<span class="sourceLineNo">212</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.212"></a>
+<span class="sourceLineNo">213</span>import org.apache.zookeeper.KeeperException;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.Server;<a name="line.214"></a>
+<span class="sourceLineNo">215</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.215"></a>
+<span class="sourceLineNo">216</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.216"></a>
+<span class="sourceLineNo">217</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>import org.slf4j.Logger;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>import org.slf4j.LoggerFactory;<a name="line.219"></a>
+<span class="sourceLineNo">220</span><a name="line.220"></a>
+<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.224"></a>
+<span class="sourceLineNo">225</span><a name="line.225"></a>
+<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.226"></a>
+<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.227"></a>
+<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.228"></a>
+<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>/**<a name="line.233"></a>
+<span class="sourceLineNo">234</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.234"></a>
+<span class="sourceLineNo">235</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.235"></a>
+<span class="sourceLineNo">236</span> * run the cluster.  All others park themselves in their constructor until<a name="line.236"></a>
+<span class="sourceLineNo">237</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.237"></a>
+<span class="sourceLineNo">238</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.238"></a>
+<span class="sourceLineNo">239</span> *<a name="line.239"></a>
+<span class="sourceLineNo">240</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.240"></a>
+<span class="sourceLineNo">241</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.241"></a>
+<span class="sourceLineNo">242</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.242"></a>
+<span class="sourceLineNo">243</span> *<a name="line.243"></a>
+<span class="sourceLineNo">244</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="l

<TRUNCATED>

[23/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
index 5c4ac87..207ebcc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
@@ -204,7 +204,7 @@
 <span class="sourceLineNo">196</span>        .setType(Cell.Type.Put)<a name="line.196"></a>
 <span class="sourceLineNo">197</span>        .setValue(Bytes.toBytes(state.name()))<a name="line.197"></a>
 <span class="sourceLineNo">198</span>        .build());<a name="line.198"></a>
-<span class="sourceLineNo">199</span>    LOG.info(info.toString(), new Exception());<a name="line.199"></a>
+<span class="sourceLineNo">199</span>    LOG.info(info.toString());<a name="line.199"></a>
 <span class="sourceLineNo">200</span>    updateRegionLocation(regionInfo, state, put);<a name="line.200"></a>
 <span class="sourceLineNo">201</span>  }<a name="line.201"></a>
 <span class="sourceLineNo">202</span><a name="line.202"></a>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
index 5c4ac87..207ebcc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
@@ -204,7 +204,7 @@
 <span class="sourceLineNo">196</span>        .setType(Cell.Type.Put)<a name="line.196"></a>
 <span class="sourceLineNo">197</span>        .setValue(Bytes.toBytes(state.name()))<a name="line.197"></a>
 <span class="sourceLineNo">198</span>        .build());<a name="line.198"></a>
-<span class="sourceLineNo">199</span>    LOG.info(info.toString(), new Exception());<a name="line.199"></a>
+<span class="sourceLineNo">199</span>    LOG.info(info.toString());<a name="line.199"></a>
 <span class="sourceLineNo">200</span>    updateRegionLocation(regionInfo, state, put);<a name="line.200"></a>
 <span class="sourceLineNo">201</span>  }<a name="line.201"></a>
 <span class="sourceLineNo">202</span><a name="line.202"></a>


[17/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      Pair&lt;Path, FSDataOu

<TRUNCATED>

[14/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      Pair&lt

<TRUNCATED>

[35/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/checkstyle.rss
----------------------------------------------------------------------
diff --git a/checkstyle.rss b/checkstyle.rss
index 3604af9..10a123a 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
     <copyright>&#169;2007 - 2018 The Apache Software Foundation</copyright>
     <item>
       <title>File: 3714,
-             Errors: 15386,
+             Errors: 15385,
              Warnings: 0,
              Infos: 0
       </title>
@@ -9547,7 +9547,7 @@ under the License.
                   0
                 </td>
                 <td>
-                  1
+                  0
                 </td>
               </tr>
                           <tr>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/coc.html
----------------------------------------------------------------------
diff --git a/coc.html b/coc.html
index e294239..09ad52e 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Code of Conduct Policy
@@ -375,7 +375,7 @@ email to <a class="externalLink" href="mailto:private@hbase.apache.org">the priv
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/dependencies.html
----------------------------------------------------------------------
diff --git a/dependencies.html b/dependencies.html
index 89c744b..1c2f0bd 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Dependencies</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -440,7 +440,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/dependency-convergence.html
----------------------------------------------------------------------
diff --git a/dependency-convergence.html b/dependency-convergence.html
index bb4d10e..48628dc 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Reactor Dependency Convergence</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -890,7 +890,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/dependency-info.html
----------------------------------------------------------------------
diff --git a/dependency-info.html b/dependency-info.html
index 334c8d2..58221d4 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Dependency Information</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -313,7 +313,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/dependency-management.html
----------------------------------------------------------------------
diff --git a/dependency-management.html b/dependency-management.html
index 3302203..8fc28cb 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Dependency Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -1005,7 +1005,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/allclasses-frame.html
----------------------------------------------------------------------
diff --git a/devapidocs/allclasses-frame.html b/devapidocs/allclasses-frame.html
index 0201c83..a86faf1 100644
--- a/devapidocs/allclasses-frame.html
+++ b/devapidocs/allclasses-frame.html
@@ -1038,6 +1038,7 @@
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util" target="classFrame"><span class="interfaceName">HBaseFsck.ErrorReporter</span></a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.ErrorReporter.ERROR_CODE</a></li>
+<li><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.FileLockCallable</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.HBaseFsckTool</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.HbckInfo</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.HdfsEntry</a></li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/allclasses-noframe.html
----------------------------------------------------------------------
diff --git a/devapidocs/allclasses-noframe.html b/devapidocs/allclasses-noframe.html
index cb9c82a..c265cbb 100644
--- a/devapidocs/allclasses-noframe.html
+++ b/devapidocs/allclasses-noframe.html
@@ -1038,6 +1038,7 @@
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util"><span class="interfaceName">HBaseFsck.ErrorReporter</span></a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a></li>
+<li><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HBaseFsckTool</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a></li>
 <li><a href="org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HdfsEntry</a></li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/constant-values.html
----------------------------------------------------------------------
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index c99b5c4..beda182 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3817,21 +3817,21 @@
 <!--   -->
 </a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/Version.html#date">date</a></code></td>
-<td class="colLast"><code>"Fri Aug 24 14:38:46 UTC 2018"</code></td>
+<td class="colLast"><code>"Sat Aug 25 14:39:06 UTC 2018"</code></td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.Version.revision">
 <!--   -->
 </a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/Version.html#revision">revision</a></code></td>
-<td class="colLast"><code>"a452487a9b82bfd33bc10683c3f8b8ae74d58883"</code></td>
+<td class="colLast"><code>"409e742ac3bdbff027b136a87339f4f5511da07d"</code></td>
 </tr>
 <tr class="altColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.Version.srcChecksum">
 <!--   -->
 </a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/Version.html#srcChecksum">srcChecksum</a></code></td>
-<td class="colLast"><code>"6a771691f343c60ea56a144f9db58ab5"</code></td>
+<td class="colLast"><code>"574f44946cbfd790ca8dd655ada11008"</code></td>
 </tr>
 <tr class="rowColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.Version.url">
@@ -27969,7 +27969,7 @@
 <tr class="rowColor">
 <td class="colFirst"><a name="org.apache.hadoop.hbase.util.HBaseFsck.HBCK_LOCK_FILE">
 <!--   -->
-</a><code>private&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
+</a><code>public&nbsp;static&nbsp;final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
 <td><code><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#HBCK_LOCK_FILE">HBCK_LOCK_FILE</a></code></td>
 <td class="colLast"><code>"hbase-hbck.lock"</code></td>
 </tr>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/index-all.html
----------------------------------------------------------------------
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 0797571..44295e6 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -10090,7 +10090,7 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html#checkAndLoadInstance-java.lang.Class-int-org.apache.hadoop.conf.Configuration-">checkAndLoadInstance(Class&lt;?&gt;, int, Configuration)</a></span> - Method in class org.apache.hadoop.hbase.coprocessor.<a href="org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html" title="class in org.apache.hadoop.hbase.coprocessor">CoprocessorHost</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#checkAndMarkRunningHbck--">checkAndMarkRunningHbck()</a></span> - Method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#checkAndMarkRunningHbck-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">checkAndMarkRunningHbck(Configuration, RetryCounter)</a></span> - Static method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
 <dd>
 <div class="block">This method maintains a lock using a file.</div>
 </dd>
@@ -16438,6 +16438,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/ConnectionCache.html#conf">conf</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/ConnectionCache.html" title="class in org.apache.hadoop.hbase.util">ConnectionCache</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#conf">conf</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#conf">conf</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo.HDFSIntegrityFixer</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html#conf">conf</a></span> - Variable in class org.apache.hadoop.hbase.util.hbck.<a href="org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a></dt>
@@ -20085,6 +20087,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/procedure/SchemaLocking.html#createLockedResource-org.apache.hadoop.hbase.procedure2.LockedResourceType-java.lang.String-org.apache.hadoop.hbase.procedure2.LockAndQueue-">createLockedResource(LockedResourceType, String, LockAndQueue)</a></span> - Method in class org.apache.hadoop.hbase.master.procedure.<a href="org/apache/hadoop/hbase/master/procedure/SchemaLocking.html" title="class in org.apache.hadoop.hbase.master.procedure">SchemaLocking</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#createLockRetryCounterFactory-org.apache.hadoop.conf.Configuration-">createLockRetryCounterFactory(Configuration)</a></span> - Static method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html#createLog-int-int-int-org.apache.hadoop.hbase.ServerName-java.lang.Throwable-long-boolean-java.lang.String-int-int-">createLog(int, int, int, ServerName, Throwable, long, boolean, String, int, int)</a></span> - Method in class org.apache.hadoop.hbase.client.<a href="org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html" title="class in org.apache.hadoop.hbase.client">AsyncRequestFutureImpl</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/locking/LockManager.html#createMasterLock-java.lang.String-org.apache.hadoop.hbase.procedure2.LockType-java.lang.String-">createMasterLock(String, LockType, String)</a></span> - Method in class org.apache.hadoop.hbase.master.locking.<a href="org/apache/hadoop/hbase/master/locking/LockManager.html" title="class in org.apache.hadoop.hbase.master.locking">LockManager</a></dt>
@@ -21294,6 +21298,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#createZNodeRetryCounterFactory">createZNodeRetryCounterFactory</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#createZnodeRetryCounterFactory-org.apache.hadoop.conf.Configuration-">createZnodeRetryCounterFactory(Configuration)</a></span> - Static method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#createZooKeeperWatcher--">createZooKeeperWatcher()</a></span> - Method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/client/SnapshotDescription.html#creationTime">creationTime</a></span> - Variable in class org.apache.hadoop.hbase.client.<a href="org/apache/hadoop/hbase/client/SnapshotDescription.html" title="class in org.apache.hadoop.hbase.client">SnapshotDescription</a></dt>
@@ -30974,7 +30980,7 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/io/FileLink.FileLinkInputStream.html#FileLinkInputStream-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.hbase.io.FileLink-int-">FileLinkInputStream(FileSystem, FileLink, int)</a></span> - Constructor for class org.apache.hadoop.hbase.io.<a href="org/apache/hadoop/hbase/io/FileLink.FileLinkInputStream.html" title="class in org.apache.hadoop.hbase.io">FileLink.FileLinkInputStream</a></dt>
 <dd>&nbsp;</dd>
-<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#FileLockCallable-org.apache.hadoop.hbase.util.RetryCounter-">FileLockCallable(RetryCounter)</a></span> - Constructor for class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></dt>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#FileLockCallable-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">FileLockCallable(Configuration, RetryCounter)</a></span> - Constructor for class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></dt>
 <dd>&nbsp;</dd>
 <dt><a href="org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.html" title="class in org.apache.hadoop.hbase.io.hfile.bucket"><span class="typeNameLink">FileMmapEngine</span></a> - Class in <a href="org/apache/hadoop/hbase/io/hfile/bucket/package-summary.html">org.apache.hadoop.hbase.io.hfile.bucket</a></dt>
 <dd>
@@ -40083,6 +40089,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/CoprocessorEnvironment.html#getHBaseVersion--">getHBaseVersion()</a></span> - Method in interface org.apache.hadoop.hbase.<a href="org/apache/hadoop/hbase/CoprocessorEnvironment.html" title="interface in org.apache.hadoop.hbase">CoprocessorEnvironment</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#getHbckLockPath--">getHbckLockPath()</a></span> - Method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/HStoreFile.html#getHDFSBlockDistribution--">getHDFSBlockDistribution()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/HStoreFile.html" title="class in org.apache.hadoop.hbase.regionserver">HStoreFile</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/regionserver/StoreFileInfo.html#getHDFSBlockDistribution--">getHDFSBlockDistribution()</a></span> - Method in class org.apache.hadoop.hbase.regionserver.<a href="org/apache/hadoop/hbase/regionserver/StoreFileInfo.html" title="class in org.apache.hadoop.hbase.regionserver">StoreFileInfo</a></dt>
@@ -52819,6 +52827,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html#getTIOError-java.io.IOException-">getTIOError(IOException)</a></span> - Method in class org.apache.hadoop.hbase.thrift2.<a href="org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html" title="class in org.apache.hadoop.hbase.thrift2">ThriftHBaseServiceHandler</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#getTmpDir-org.apache.hadoop.conf.Configuration-">getTmpDir(Configuration)</a></span> - Static method in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/wal/WALSplitter.html#getTmpRecoveredEditsFileName-java.lang.String-">getTmpRecoveredEditsFileName(String)</a></span> - Static method in class org.apache.hadoop.hbase.wal.<a href="org/apache/hadoop/hbase/wal/WALSplitter.html" title="class in org.apache.hadoop.hbase.wal">WALSplitter</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/thrift2/ThriftServer.html#getTNonBlockingServer-org.apache.thrift.protocol.TProtocolFactory-org.apache.thrift.TProcessor-org.apache.thrift.transport.TTransportFactory-java.net.InetSocketAddress-">getTNonBlockingServer(TProtocolFactory, TProcessor, TTransportFactory, InetSocketAddress)</a></span> - Static method in class org.apache.hadoop.hbase.thrift2.<a href="org/apache/hadoop/hbase/thrift2/ThriftServer.html" title="class in org.apache.hadoop.hbase.thrift2">ThriftServer</a></dt>
@@ -56920,7 +56930,9 @@
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#hbck">hbck</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.WorkItemRegion</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#HBCK_LOCK_FILE">HBCK_LOCK_FILE</a></span> - Static variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
-<dd>&nbsp;</dd>
+<dd>
+<div class="block">Here is where hbase-1.x used to default the lock for hbck1.</div>
+</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#HBCK_LOCK_PATH">HBCK_LOCK_PATH</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/HConstants.html#HBCK_SIDELINEDIR_NAME">HBCK_SIDELINEDIR_NAME</a></span> - Static variable in class org.apache.hadoop.hbase.<a href="org/apache/hadoop/hbase/HConstants.html" title="class in org.apache.hadoop.hbase">HConstants</a></dt>
@@ -56933,6 +56945,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#hbckLockCleanup">hbckLockCleanup</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#hbckLockPath">hbckLockPath</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></dt>
+<dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#hbckOutFd">hbckOutFd</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/util/HBaseFsck.html#hbckZodeCreated">hbckZodeCreated</a></span> - Variable in class org.apache.hadoop.hbase.util.<a href="org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index 44aeebe..a6b88dd 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -167,10 +167,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupRestoreConstants.BackupCommand</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupInfo.BackupPhase.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupInfo.BackupPhase</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupInfo.BackupState.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupInfo.BackupState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupRestoreConstants.BackupCommand</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupType.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.backup.<a href="../../../../../org/apache/hadoop/hbase/backup/BackupInfo.BackupState.html" title="enum in org.apache.hadoop.hbase.backup"><span class="typeNameLink">BackupInfo.BackupState</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 0aeed8f..a1f2e6d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -552,24 +552,24 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Durability.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Durability</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">MobCompactPartitionPolicy</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/RegionLocateType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">RegionLocateType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/ScannerCallable.MoreResults.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">ScannerCallable.MoreResults</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/SnapshotType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">SnapshotType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Scan.ReadType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Scan.ReadType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncProcessTask.SubmittedRows.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncProcessTask.SubmittedRows</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/RequestController.ReturnCode.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">RequestController.ReturnCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/TableState.State.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">TableState.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactionState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactionState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Scan.ReadType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Scan.ReadType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/MasterSwitchType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">MasterSwitchType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/IsolationLevel.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">IsolationLevel</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/ScannerCallable.MoreResults.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">ScannerCallable.MoreResults</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/TableState.State.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">TableState.State</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/RequestController.ReturnCode.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">RequestController.ReturnCode</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncProcessTask.SubmittedRows.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncProcessTask.SubmittedRows</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Durability.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Durability</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AbstractResponse.ResponseType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AbstractResponse.ResponseType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/Consistency.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">Consistency</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/CompactType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">CompactType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AbstractResponse.ResponseType.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AbstractResponse.ResponseType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">AsyncRequestFutureImpl.Retry</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">MobCompactPartitionPolicy</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.client.<a href="../../../../../org/apache/hadoop/hbase/client/IsolationLevel.html" title="enum in org.apache.hadoop.hbase.client"><span class="typeNameLink">IsolationLevel</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
index 329f62f..e67b778 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
@@ -201,8 +201,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.coprocessor.<a href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html" title="enum in org.apache.hadoop.hbase.coprocessor"><span class="typeNameLink">RegionObserver.MutationType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.coprocessor.<a href="../../../../../org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.MetaTableOps.html" title="enum in org.apache.hadoop.hbase.coprocessor"><span class="typeNameLink">MetaTableMetrics.MetaTableOps</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.coprocessor.<a href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html" title="enum in org.apache.hadoop.hbase.coprocessor"><span class="typeNameLink">RegionObserver.MutationType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 3039625..48c695d 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -184,13 +184,13 @@
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
 <li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">RegexStringComparator.EngineType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.SatisfiesCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.SatisfiesCode</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">CompareFilter.CompareOp</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/Filter.ReturnCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">Filter.ReturnCode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.Order</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterList.Operator.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterList.Operator</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FuzzyRowFilter.SatisfiesCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FuzzyRowFilter.SatisfiesCode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterWrapper.FilterRowRetCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterWrapper.FilterRowRetCode</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/BitComparator.BitwiseOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">BitComparator.BitwiseOp</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/Filter.ReturnCode.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">Filter.ReturnCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">CompareFilter.CompareOp</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.filter.<a href="../../../../../org/apache/hadoop/hbase/filter/FilterList.Operator.html" title="enum in org.apache.hadoop.hbase.filter"><span class="typeNameLink">FilterList.Operator</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 54d332f..d10ee82 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -274,11 +274,11 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/Cacheable.MemoryType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">Cacheable.MemoryType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">HFileBlock.Writer.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockType.BlockCategory.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockType.BlockCategory</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/Cacheable.MemoryType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">Cacheable.MemoryType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">CacheConfig.ExternalBlockCaches</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockType.BlockCategory.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockType.BlockCategory</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">HFileBlock.Writer.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/BlockPriority.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">BlockPriority</span></a></li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index 91a2ed4..df0adf0 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -353,9 +353,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/CallEvent.Type.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">CallEvent.Type</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.BufferCallAction.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">BufferCallBeforeInitHandler.BufferCallAction</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.SourceStorage.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">MetricsHBaseServerSourceFactoryImpl.SourceStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.BufferCallAction.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">BufferCallBeforeInitHandler.BufferCallAction</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.ipc.<a href="../../../../../org/apache/hadoop/hbase/ipc/CallEvent.Type.html" title="enum in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">CallEvent.Type</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
index dfed8e7..a07a1d8 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
@@ -293,10 +293,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.Counters.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">RowCounter.RowCounterMapper.Counters</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/TableSplit.Version.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">TableSplit.Version</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">CellCounter.CellCounterMapper.Counters</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/TableSplit.Version.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">TableSplit.Version</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/SyncTable.SyncMapper.Counter.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">SyncTable.SyncMapper.Counter</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.mapreduce.<a href="../../../../../org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.Counters.html" title="enum in org.apache.hadoop.hbase.mapreduce"><span class="typeNameLink">RowCounter.RowCounterMapper.Counters</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index e2fd7c1..2f343a8 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.256">HMaster.InitializationMonitor</a>
+<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.257">HMaster.InitializationMonitor</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" title="class in org.apache.hadoop.hbase.util">HasThread</a></pre>
 <div class="block">Protection against zombie master. Started once Master accepts active responsibility and
  starts taking over responsibilities. Allows a finite time window before giving up ownership.</div>
@@ -250,7 +250,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockList">
 <li class="blockList">
 <h4>TIMEOUT_KEY</h4>
-<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.258">TIMEOUT_KEY</a></pre>
+<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.259">TIMEOUT_KEY</a></pre>
 <div class="block">The amount of time in milliseconds to sleep before checking initialization status.</div>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
@@ -264,7 +264,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockList">
 <li class="blockList">
 <h4>TIMEOUT_DEFAULT</h4>
-<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.259">TIMEOUT_DEFAULT</a></pre>
+<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.260">TIMEOUT_DEFAULT</a></pre>
 </li>
 </ul>
 <a name="HALT_KEY">
@@ -273,7 +273,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockList">
 <li class="blockList">
 <h4>HALT_KEY</h4>
-<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.265">HALT_KEY</a></pre>
+<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.266">HALT_KEY</a></pre>
 <div class="block">When timeout expired and initialization has not complete, call <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/System.html?is-external=true#exit-int-" title="class or interface in java.lang"><code>System.exit(int)</code></a> when
  true, do nothing otherwise.</div>
 <dl>
@@ -288,7 +288,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockList">
 <li class="blockList">
 <h4>HALT_DEFAULT</h4>
-<pre>public static final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.266">HALT_DEFAULT</a></pre>
+<pre>public static final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.267">HALT_DEFAULT</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.master.HMaster.InitializationMonitor.HALT_DEFAULT">Constant Field Values</a></dd>
@@ -301,7 +301,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockList">
 <li class="blockList">
 <h4>master</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/HMaster.html" title="class in org.apache.hadoop.hbase.master">HMaster</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.268">master</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/master/HMaster.html" title="class in org.apache.hadoop.hbase.master">HMaster</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.269">master</a></pre>
 </li>
 </ul>
 <a name="timeout">
@@ -310,7 +310,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockList">
 <li class="blockList">
 <h4>timeout</h4>
-<pre>private final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.269">timeout</a></pre>
+<pre>private final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.270">timeout</a></pre>
 </li>
 </ul>
 <a name="haltOnTimeout">
@@ -319,7 +319,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockListLast">
 <li class="blockList">
 <h4>haltOnTimeout</h4>
-<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.270">haltOnTimeout</a></pre>
+<pre>private final&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.271">haltOnTimeout</a></pre>
 </li>
 </ul>
 </li>
@@ -336,7 +336,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockListLast">
 <li class="blockList">
 <h4>InitializationMonitor</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.273">InitializationMonitor</a>(<a href="../../../../../org/apache/hadoop/hbase/master/HMaster.html" title="class in org.apache.hadoop.hbase.master">HMaster</a>&nbsp;master)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.274">InitializationMonitor</a>(<a href="../../../../../org/apache/hadoop/hbase/master/HMaster.html" title="class in org.apache.hadoop.hbase.master">HMaster</a>&nbsp;master)</pre>
 <div class="block">Creates a Thread that monitors the <a href="../../../../../org/apache/hadoop/hbase/master/HMaster.html#isInitialized--"><code>HMaster.isInitialized()</code></a> state.</div>
 </li>
 </ul>
@@ -354,7 +354,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HasThread.html" tit
 <ul class="blockListLast">
 <li class="blockList">
 <h4>run</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.282">run</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html#line.283">run</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--" title="class or interface in java.lang">run</a></code>&nbsp;in interface&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true" title="class or interface in java.lang">Runnable</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index f556da2..dde8752 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -136,7 +136,7 @@
 </dl>
 <hr>
 <br>
-<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.2910">HMaster.MasterStoppedException</a>
+<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.2919">HMaster.MasterStoppedException</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/DoNotRetryIOException.html" title="class in org.apache.hadoop.hbase">DoNotRetryIOException</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
@@ -205,7 +205,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/DoNotRetryIOException.ht
 <ul class="blockListLast">
 <li class="blockList">
 <h4>MasterStoppedException</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html#line.2911">MasterStoppedException</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html#line.2920">MasterStoppedException</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index fbb7e79..11aebb1 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.439">HMaster.RedirectServlet</a>
+<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.440">HMaster.RedirectServlet</a>
 extends javax.servlet.http.HttpServlet</pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
@@ -243,7 +243,7 @@ extends javax.servlet.http.HttpServlet</pre>
 <ul class="blockList">
 <li class="blockList">
 <h4>serialVersionUID</h4>
-<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.440">serialVersionUID</a></pre>
+<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.441">serialVersionUID</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.master.HMaster.RedirectServlet.serialVersionUID">Constant Field Values</a></dd>
@@ -256,7 +256,7 @@ extends javax.servlet.http.HttpServlet</pre>
 <ul class="blockList">
 <li class="blockList">
 <h4>regionServerInfoPort</h4>
-<pre>private final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.441">regionServerInfoPort</a></pre>
+<pre>private final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.442">regionServerInfoPort</a></pre>
 </li>
 </ul>
 <a name="regionServerHostname">
@@ -265,7 +265,7 @@ extends javax.servlet.http.HttpServlet</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>regionServerHostname</h4>
-<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.442">regionServerHostname</a></pre>
+<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.443">regionServerHostname</a></pre>
 </li>
 </ul>
 </li>
@@ -282,7 +282,7 @@ extends javax.servlet.http.HttpServlet</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>RedirectServlet</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.448">RedirectServlet</a>(<a href="../../../../../org/apache/hadoop/hbase/http/InfoServer.html" title="class in org.apache.hadoop.hbase.http">InfoServer</a>&nbsp;infoServer,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.449">RedirectServlet</a>(<a href="../../../../../org/apache/hadoop/hbase/http/InfoServer.html" title="class in org.apache.hadoop.hbase.http">InfoServer</a>&nbsp;infoServer,
                        <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;hostname)</pre>
 <dl>
 <dt><span class="paramLabel">Parameters:</span></dt>
@@ -305,7 +305,7 @@ extends javax.servlet.http.HttpServlet</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>doGet</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.454">doGet</a>(javax.servlet.http.HttpServletRequest&nbsp;request,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html#line.455">doGet</a>(javax.servlet.http.HttpServletRequest&nbsp;request,
                   javax.servlet.http.HttpServletResponse&nbsp;response)
            throws javax.servlet.ServletException,
                   <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index 4a2c0d5..1aede1a 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>protected static interface <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.2330">HMaster.TableDescriptorGetter</a></pre>
+<pre>protected static interface <a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.html#line.2339">HMaster.TableDescriptorGetter</a></pre>
 <div class="block">Implement to return TableDescriptor after pre-checks</div>
 </li>
 </ul>
@@ -150,7 +150,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockListLast">
 <li class="blockList">
 <h4>get</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html#line.2331">get</a>()
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html#line.2340">get</a>()
              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>


[21/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="l

<TRUNCATED>

[22/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>  

<TRUNCATED>

[36/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/checkstyle-aggregate.html
----------------------------------------------------------------------
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 236f443..2a0708a 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Checkstyle Results</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -284,7 +284,7 @@
 <td>3714</td>
 <td>0</td>
 <td>0</td>
-<td>15386</td></tr></table></div>
+<td>15385</td></tr></table></div>
 <div class="section">
 <h2><a name="Files"></a>Files</h2>
 <table border="0" class="table table-striped">
@@ -4739,4986 +4739,4981 @@
 <td>0</td>
 <td>1</td></tr>
 <tr class="a">
-<td><a href="#org.apache.hadoop.hbase.master.TestMaster.java">org/apache/hadoop/hbase/master/TestMaster.java</a></td>
-<td>0</td>
-<td>0</td>
-<td>1</td></tr>
-<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence.java">org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterFileSystem.java">org/apache/hadoop/hbase/master/TestMasterFileSystem.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterFileSystemWithWALDir.java">org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterMetrics.java">org/apache/hadoop/hbase/master/TestMasterMetrics.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterMetricsWrapper.java">org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterNoCluster.java">org/apache/hadoop/hbase/master/TestMasterNoCluster.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>20</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterOperationsForRegionReplicas.java">org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterStatusServlet.java">org/apache/hadoop/hbase/master/TestMasterStatusServlet.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestMasterTransitions.java">org/apache/hadoop/hbase/master/TestMasterTransitions.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestMirroringTableStateManager.java">org/apache/hadoop/hbase/master/TestMirroringTableStateManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestRegionPlacement.java">org/apache/hadoop/hbase/master/TestRegionPlacement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>19</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestRegionPlacement2.java">org/apache/hadoop/hbase/master/TestRegionPlacement2.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestRollingRestart.java">org/apache/hadoop/hbase/master/TestRollingRestart.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestSplitLogManager.java">org/apache/hadoop/hbase/master/TestSplitLogManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.TestTableStateManager.java">org/apache/hadoop/hbase/master/TestTableStateManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.TestWarmupRegion.java">org/apache/hadoop/hbase/master/TestWarmupRegion.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>19</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.AssignProcedure.java">org/apache/hadoop/hbase/master/assignment/AssignProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.AssignmentManager.java">org/apache/hadoop/hbase/master/assignment/AssignmentManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>13</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.GCMergedRegionsProcedure.java">org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>16</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.GCRegionProcedure.java">org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>31</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure.java">org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.MockMasterServices.java">org/apache/hadoop/hbase/master/assignment/MockMasterServices.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.RegionStateStore.java">org/apache/hadoop/hbase/master/assignment/RegionStateStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.RegionStates.java">org/apache/hadoop/hbase/master/assignment/RegionStates.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>16</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure.java">org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.TestAssignmentOnRSCrash.java">org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.TestRegionStates.java">org/apache/hadoop/hbase/master/assignment/TestRegionStates.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.assignment.TestSplitTableRegionProcedure.java">org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.BalancerChore.java">org/apache/hadoop/hbase/master/balancer/BalancerChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.BalancerTestBase.java">org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>74</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.java">org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>60</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.ClusterLoadState.java">org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.ClusterStatusChore.java">org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer.java">org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory.java">org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation.java">org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.RegionInfoComparator.java">org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.RegionLocationFinder.java">org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.ServerAndLoad.java">org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer.java">org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>33</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.java">org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>32</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestBaseLoadBalancer.java">org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>16</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestDefaultLoadBalancer.java">org/apache/hadoop/hbase/master/balancer/TestDefaultLoadBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>48</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestFavoredNodeTableImport.java">org/apache/hadoop/hbase/master/balancer/TestFavoredNodeTableImport.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestFavoredStochasticBalancerPickers.java">org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions.java">org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>13</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestServerAndLoad.java">org/apache/hadoop/hbase/master/balancer/TestServerAndLoad.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.balancer.TestStochasticLoadBalancer.java">org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>33</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.BaseFileCleanerDelegate.java">org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.java">org/apache/hadoop/hbase/master/cleaner/BaseLogCleanerDelegate.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.CleanerChore.java">org/apache/hadoop/hbase/master/cleaner/CleanerChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.FileCleanerDelegate.java">org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner.java">org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.TestCleanerChore.java">org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.TestHFileCleaner.java">org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.TestHFileLinkCleaner.java">org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.TestSnapshotFromMaster.java">org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner.java">org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner.java">org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.locking.LockManager.java">org/apache/hadoop/hbase/master/locking/LockManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.locking.LockProcedure.java">org/apache/hadoop/hbase/master/locking/LockProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.normalizer.EmptyNormalizationPlan.java">org/apache/hadoop/hbase/master/normalizer/EmptyNormalizationPlan.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.java">org/apache/hadoop/hbase/master/normalizer/NormalizationPlan.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore.java">org/apache/hadoop/hbase/master/normalizer/RegionNormalizerChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory.java">org/apache/hadoop/hbase/master/normalizer/RegionNormalizerFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.normalizer.TestSimpleRegionNormalizerOnCluster.java">org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedure.java">org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure.java">org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure.java">org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure.java">org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure.java">org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>31</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure.java">org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>34</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.EnableTableProcedure.java">org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv.java">org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.java">org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.MasterProcedureSchedulerPerformanceEvaluation.java">org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility.java">org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.java">org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure.java">org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>21</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure.java">org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>12</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.ProcedureDescriber.java">org/apache/hadoop/hbase/master/procedure/ProcedureDescriber.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch.java">org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait.java">org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>47</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure.java">org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure.java">org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestDeleteColumnFamilyProcedureFromClient.java">org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestDeleteNamespaceProcedure.java">org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestMasterObserverPostCalls.java">org/apache/hadoop/hbase/master/procedure/TestMasterObserverPostCalls.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestMasterProcedureEvents.java">org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.java">org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestMasterProcedureSchedulerConcurrency.java">org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestMasterProcedureWalLease.java">org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestSafemodeBringsDownMaster.java">org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestTableDDLProcedureBase.java">org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestTableDescriptorModificationFromClient.java">org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TestWALProcedureStoreOnHDFS.java">org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure.java">org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.EnabledTableSnapshotHandler.java">org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.MasterSnapshotVerifier.java">org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.SnapshotFileCache.java">org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner.java">org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.SnapshotManager.java">org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>16</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.TakeSnapshotHandler.java">org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.TestSnapshotHFileCleaner.java">org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.master.snapshot.TestSnapshotManager.java">org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.CachedMobFile.java">org/apache/hadoop/hbase/mob/CachedMobFile.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.DefaultMobStoreCompactor.java">org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>8</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.DefaultMobStoreFlusher.java">org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner.java">org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.MobCacheConfig.java">org/apache/hadoop/hbase/mob/MobCacheConfig.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.MobConstants.java">org/apache/hadoop/hbase/mob/MobConstants.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.MobFile.java">org/apache/hadoop/hbase/mob/MobFile.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.MobFileCache.java">org/apache/hadoop/hbase/mob/MobFileCache.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.MobFileName.java">org/apache/hadoop/hbase/mob/MobFileName.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.MobTestUtil.java">org/apache/hadoop/hbase/mob/MobTestUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.MobUtils.java">org/apache/hadoop/hbase/mob/MobUtils.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>15</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.TestDefaultMobStoreFlusher.java">org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>35</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.TestExpiredMobFileCleaner.java">org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.compactions.MobCompactor.java">org/apache/hadoop/hbase/mob/compactions/MobCompactor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.java">org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor.java">org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mob.compactions.TestMobCompactor.java">org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.mob.compactions.TestPartitionedMobCompactor.java">org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.monitoring.LogMonitoring.java">org/apache/hadoop/hbase/monitoring/LogMonitoring.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer.java">org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl.java">org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.monitoring.MonitoredTaskImpl.java">org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.monitoring.StateDumpServlet.java">org/apache/hadoop/hbase/monitoring/StateDumpServlet.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.monitoring.TaskMonitor.java">org/apache/hadoop/hbase/monitoring/TaskMonitor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.monitoring.TestTaskMonitor.java">org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.monitoring.ThreadMonitoring.java">org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.mttr.IntegrationTestMTTR.java">org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.namespace.NamespaceStateManager.java">org/apache/hadoop/hbase/namespace/NamespaceStateManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.namespace.NamespaceTableAndRegionInfo.java">org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.namespace.TestNamespaceAuditor.java">org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.net.Address.java">org/apache/hadoop/hbase/net/Address.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.net.TestAddress.java">org/apache/hadoop/hbase/net/TestAddress.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.nio.ByteBuff.java">org/apache/hadoop/hbase/nio/ByteBuff.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>24</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.nio.MultiByteBuff.java">org/apache/hadoop/hbase/nio/MultiByteBuff.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>29</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.nio.SingleByteBuff.java">org/apache/hadoop/hbase/nio/SingleByteBuff.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.MasterProcedureManager.java">org/apache/hadoop/hbase/procedure/MasterProcedureManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.Procedure.java">org/apache/hadoop/hbase/procedure/Procedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>14</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.ProcedureCoordinator.java">org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>11</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs.java">org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.ProcedureManagerHost.java">org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.ProcedureMember.java">org/apache/hadoop/hbase/procedure/ProcedureMember.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>16</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs.java">org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.RegionServerProcedureManager.java">org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.SimpleMasterProcedureManager.java">org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.SimpleRSProcedureManager.java">org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.Subprocedure.java">org/apache/hadoop/hbase/procedure/Subprocedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.TestProcedureCoordinator.java">org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>11</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.TestProcedureDescriber.java">org/apache/hadoop/hbase/procedure/TestProcedureDescriber.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.TestProcedureMember.java">org/apache/hadoop/hbase/procedure/TestProcedureMember.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.TestZKProcedure.java">org/apache/hadoop/hbase/procedure/TestZKProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>27</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.TestZKProcedureControllers.java">org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator.java">org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs.java">org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.ZKProcedureUtil.java">org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.flush.FlushTableSubprocedure.java">org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager.java">org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager.java">org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.java">org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.BadProcedureException.java">org/apache/hadoop/hbase/procedure2/BadProcedureException.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.LockedResource.java">org/apache/hadoop/hbase/procedure2/LockedResource.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.Procedure.java">org/apache/hadoop/hbase/procedure2/Procedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.ProcedureDeque.java">org/apache/hadoop/hbase/procedure2/ProcedureDeque.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.ProcedureException.java">org/apache/hadoop/hbase/procedure2/ProcedureException.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.ProcedureExecutor.java">org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.ProcedureScheduler.java">org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.java">org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.ProcedureUtil.java">org/apache/hadoop/hbase/procedure2/ProcedureUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.java">org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.RemoteProcedureException.java">org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.RootProcedureState.java">org/apache/hadoop/hbase/procedure2/RootProcedureState.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.StateMachineProcedure.java">org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureExecution.java">org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureMetrics.java">org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureNonce.java">org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.java">org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureReplayOrder.java">org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureSchedulerConcurrency.java">org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureSuspended.java">org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestProcedureToString.java">org/apache/hadoop/hbase/procedure2/TestProcedureToString.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestStateMachineProcedure.java">org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.TestYieldProcedures.java">org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.ProcedureStore.java">org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase.java">org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker.java">org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.TestProcedureStoreTracker.java">org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFile.java">org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFormat.java">org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFormatReader.java">org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALLoaderPerformanceEvaluation.java">org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALPerformanceEvaluation.java">org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALPrettyPrinter.java">org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.TestStressWALProcedureStore.java">org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore.java">org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>12</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore.java">org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.procedure2.util.DelayedUtil.java">org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.procedure2.util.StringUtils.java">org/apache/hadoop/hbase/procedure2/util/StringUtils.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.protobuf.ProtobufMagic.java">org/apache/hadoop/hbase/protobuf/ProtobufMagic.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.protobuf.ProtobufMessageConverter.java">org/apache/hadoop/hbase/protobuf/ProtobufMessageConverter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.protobuf.ProtobufUtil.java">org/apache/hadoop/hbase/protobuf/ProtobufUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>115</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil.java">org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.protobuf.TestProtobufUtil.java">org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.protobuf.TestReplicationProtobuf.java">org/apache/hadoop/hbase/protobuf/TestReplicationProtobuf.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement.java">org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.AverageIntervalRateLimiter.java">org/apache/hadoop/hbase/quotas/AverageIntervalRateLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.DefaultOperationQuota.java">org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore.java">org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.FixedIntervalRateLimiter.java">org/apache/hadoop/hbase/quotas/FixedIntervalRateLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.GlobalQuotaSettings.java">org/apache/hadoop/hbase/quotas/GlobalQuotaSettings.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.GlobalQuotaSettingsImpl.java">org/apache/hadoop/hbase/quotas/GlobalQuotaSettingsImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.NamespaceQuotaSnapshotStore.java">org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.NoopOperationQuota.java">org/apache/hadoop/hbase/quotas/NoopOperationQuota.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.NoopQuotaLimiter.java">org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.OperationQuota.java">org/apache/hadoop/hbase/quotas/OperationQuota.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaCache.java">org/apache/hadoop/hbase/quotas/QuotaCache.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaLimiter.java">org/apache/hadoop/hbase/quotas/QuotaLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaLimiterFactory.java">org/apache/hadoop/hbase/quotas/QuotaLimiterFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaObserverChore.java">org/apache/hadoop/hbase/quotas/QuotaObserverChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaRetriever.java">org/apache/hadoop/hbase/quotas/QuotaRetriever.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaSettings.java">org/apache/hadoop/hbase/quotas/QuotaSettings.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.java">org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaState.java">org/apache/hadoop/hbase/quotas/QuotaState.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaTableUtil.java">org/apache/hadoop/hbase/quotas/QuotaTableUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.QuotaUtil.java">org/apache/hadoop/hbase/quotas/QuotaUtil.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>8</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.RateLimiter.java">org/apache/hadoop/hbase/quotas/RateLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>18</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.java">org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager.java">org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.java">org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceLimitSettings.java">org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore.java">org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.java">org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier.java">org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifier.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory.java">org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierForTest.java">org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierForTest.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcementFactory.java">org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.TableQuotaSnapshotStore.java">org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestFileSystemUtilizationChore.java">org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestNamespaceQuotaViolationStore.java">org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestQuotaFilter.java">org/apache/hadoop/hbase/quotas/TestQuotaFilter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestQuotaObserverChoreWithMiniCluster.java">org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestQuotaSettingsFactory.java">org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestQuotaStatusRPCs.java">org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestQuotaThrottle.java">org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestRateLimiter.java">org/apache/hadoop/hbase/quotas/TestRateLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestSpaceQuotas.java">org/apache/hadoop/hbase/quotas/TestSpaceQuotas.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestTableQuotaViolationStore.java">org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TestTablesWithQuotas.java">org/apache/hadoop/hbase/quotas/TestTablesWithQuotas.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.ThrottleSettings.java">org/apache/hadoop/hbase/quotas/ThrottleSettings.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.TimeBasedLimiter.java">org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.UserQuotaState.java">org/apache/hadoop/hbase/quotas/UserQuotaState.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.BaseViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/DefaultViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.DisableTableViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.NoInsertsViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/NoInsertsViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.NoWritesCompactionsViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/NoWritesCompactionsViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.NoWritesViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/NoWritesViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.quotas.policies.TestBulkLoadCheckingViolationPolicyEnforcement.java">org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.AbstractMemStore.java">org/apache/hadoop/hbase/regionserver/AbstractMemStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter.java">org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.AbstractTestDateTieredCompactionPolicy.java">org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.AnnotationReadingPriorityFunction.java">org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>16</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.BaseRowProcessor.java">org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.BusyRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/BusyRegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ByteBufferChunkKeyValue.java">org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CSLMImmutableSegment.java">org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CellArrayImmutableSegment.java">org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CellArrayMap.java">org/apache/hadoop/hbase/regionserver/CellArrayMap.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CellChunkMap.java">org/apache/hadoop/hbase/regionserver/CellChunkMap.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CellFlatMap.java">org/apache/hadoop/hbase/regionserver/CellFlatMap.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>11</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CellSet.java">org/apache/hadoop/hbase/regionserver/CellSet.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CellSink.java">org/apache/hadoop/hbase/regionserver/CellSink.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ChangedReadersObserver.java">org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.Chunk.java">org/apache/hadoop/hbase/regionserver/Chunk.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ChunkCreator.java">org/apache/hadoop/hbase/regionserver/ChunkCreator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompactSplit.java">org/apache/hadoop/hbase/regionserver/CompactSplit.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompactedHFilesDischargeHandler.java">org/apache/hadoop/hbase/regionserver/CompactedHFilesDischargeHandler.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger.java">org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompactingMemStore.java">org/apache/hadoop/hbase/regionserver/CompactingMemStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompactionPipeline.java">org/apache/hadoop/hbase/regionserver/CompactionPipeline.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompactionTool.java">org/apache/hadoop/hbase/regionserver/CompactionTool.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CompositeImmutableSegment.java">org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.CreateRandomStoreFile.java">org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.java">org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.java">org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.java">org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>31</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DefaultMemStore.java">org/apache/hadoop/hbase/regionserver/DefaultMemStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.java">org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DelegatingKeyValueScanner.java">org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DelimitedKeyPrefixRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.FavoredNodesForRegion.java">org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.FifoRpcSchedulerFactory.java">org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.FlushPolicyFactory.java">org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.FlushRequestListener.java">org/apache/hadoop/hbase/regionserver/FlushRequestListener.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.FlushRequester.java">org/apache/hadoop/hbase/regionserver/FlushRequester.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HMobStore.java">org/apache/hadoop/hbase/regionserver/HMobStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>8</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HRegion.java">org/apache/hadoop/hbase/regionserver/HRegion.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>213</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HRegionFileSystem.java">org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>47</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HRegionServer.java">org/apache/hadoop/hbase/regionserver/HRegionServer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>75</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine.java">org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HStore.java">org/apache/hadoop/hbase/regionserver/HStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>43</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HStoreFile.java">org/apache/hadoop/hbase/regionserver/HStoreFile.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HeapMemoryManager.java">org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>11</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.HeapMemoryTuner.java">org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ImmutableSegment.java">org/apache/hadoop/hbase/regionserver/ImmutableSegment.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.InternalScan.java">org/apache/hadoop/hbase/regionserver/InternalScan.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.InternalScanner.java">org/apache/hadoop/hbase/regionserver/InternalScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.KeyValueHeap.java">org/apache/hadoop/hbase/regionserver/KeyValueHeap.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>11</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.KeyValueScanner.java">org/apache/hadoop/hbase/regionserver/KeyValueScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.LeaseException.java">org/apache/hadoop/hbase/regionserver/LeaseException.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.Leases.java">org/apache/hadoop/hbase/regionserver/Leases.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.LogRoller.java">org/apache/hadoop/hbase/regionserver/LogRoller.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStore.java">org/apache/hadoop/hbase/regionserver/MemStore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreCompactor.java">org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreFlusher.java">org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>29</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreLAB.java">org/apache/hadoop/hbase/regionserver/MemStoreLAB.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreLABImpl.java">org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreMergerSegmentsIterator.java">org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreSegmentsIterator.java">org/apache/hadoop/hbase/regionserver/MemStoreSegmentsIterator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MemStoreSnapshot.java">org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MetricsRegion.java">org/apache/hadoop/hbase/regionserver/MetricsRegion.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MetricsRegionServer.java">org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperImpl.java">org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperStub.java">org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MetricsTable.java">org/apache/hadoop/hbase/regionserver/MetricsTable.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MetricsTableWrapperAggregateImpl.java">org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress.java">org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MobReferenceOnlyFilter.java">org/apache/hadoop/hbase/regionserver/MobReferenceOnlyFilter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.java">org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MutableOnlineRegions.java">org/apache/hadoop/hbase/regionserver/MutableOnlineRegions.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.MutableSegment.java">org/apache/hadoop/hbase/regionserver/MutableSegment.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.NoTagByteBufferChunkKeyValue.java">org/apache/hadoop/hbase/regionserver/NoTagByteBufferChunkKeyValue.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner.java">org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner.java">org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.OOMERegionServer.java">org/apache/hadoop/hbase/regionserver/OOMERegionServer.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.OnlineRegions.java">org/apache/hadoop/hbase/regionserver/OnlineRegions.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.OperationStatus.java">org/apache/hadoop/hbase/regionserver/OperationStatus.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RSDumpServlet.java">org/apache/hadoop/hbase/regionserver/RSDumpServlet.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RSRpcServices.java">org/apache/hadoop/hbase/regionserver/RSRpcServices.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>65</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RSStatusServlet.java">org/apache/hadoop/hbase/regionserver/RSStatusServlet.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.Region.java">org/apache/hadoop/hbase/regionserver/Region.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>25</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionAsTable.java">org/apache/hadoop/hbase/regionserver/RegionAsTable.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>41</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.java">org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>79</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionScanner.java">org/apache/hadoop/hbase/regionserver/RegionScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionServerAccounting.java">org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost.java">org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>8</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionServerServices.java">org/apache/hadoop/hbase/regionserver/RegionServerServices.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionServicesForStores.java">org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>6</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RegionSplitPolicy.java">org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>5</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ReplicationSinkService.java">org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ReversedKeyValueHeap.java">org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ReversedRegionScannerImpl.java">org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ReversedStoreScanner.java">org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RowProcessor.java">org/apache/hadoop/hbase/regionserver/RowProcessor.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory.java">org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ScanInfo.java">org/apache/hadoop/hbase/regionserver/ScanInfo.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ScanOptions.java">org/apache/hadoop/hbase/regionserver/ScanOptions.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ScannerContext.java">org/apache/hadoop/hbase/regionserver/ScannerContext.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>9</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ScannerIdGenerator.java">org/apache/hadoop/hbase/regionserver/ScannerIdGenerator.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.java">org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.Segment.java">org/apache/hadoop/hbase/regionserver/Segment.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.SegmentFactory.java">org/apache/hadoop/hbase/regionserver/SegmentFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>10</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.SegmentScanner.java">org/apache/hadoop/hbase/regionserver/SegmentScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ServerNonceManager.java">org/apache/hadoop/hbase/regionserver/ServerNonceManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>8</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ShipperListener.java">org/apache/hadoop/hbase/regionserver/ShipperListener.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.ShutdownHook.java">org/apache/hadoop/hbase/regionserver/ShutdownHook.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>7</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory.java">org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.SplitRequest.java">org/apache/hadoop/hbase/regionserver/SplitRequest.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy.java">org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.Store.java">org/apache/hadoop/hbase/regionserver/Store.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFileComparators.java">org/apache/hadoop/hbase/regionserver/StoreFileComparators.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFileInfo.java">org/apache/hadoop/hbase/regionserver/StoreFileInfo.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>18</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFileManager.java">org/apache/hadoop/hbase/regionserver/StoreFileManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFileReader.java">org/apache/hadoop/hbase/regionserver/StoreFileReader.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFileScanner.java">org/apache/hadoop/hbase/regionserver/StoreFileScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>12</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFileWriter.java">org/apache/hadoop/hbase/regionserver/StoreFileWriter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>13</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFlushContext.java">org/apache/hadoop/hbase/regionserver/StoreFlushContext.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreFlusher.java">org/apache/hadoop/hbase/regionserver/StoreFlusher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>4</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreScanner.java">org/apache/hadoop/hbase/regionserver/StoreScanner.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>21</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StoreUtils.java">org/apache/hadoop/hbase/regionserver/StoreUtils.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StorefileRefresherChore.java">org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>3</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter.java">org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>2</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StripeStoreConfig.java">org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StripeStoreEngine.java">org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.java">org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>15</td></tr>
-<tr class="a">
+<tr class="b">
 <td><a href="#org.apache.hadoop.hbase.regionserver.StripeStoreFlusher.java">org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java</a></td>
 <td>0</td>
 <td>0</td>
 <td>1</td></tr>
-<tr class="b">
+<tr class="a">
 <td><a href="#org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java">org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java</a></t

<TRUNCATED>

[30/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
index 8d4e3b5..422ad2f 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":9,"i25":9,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":9,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":9,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":9,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":9,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,
 "i110":10,"i111":10,"i112":9,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":9,"i24":9,"i25":10,"i26":9,"i27":9,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":9,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":9,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":9,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":9,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":9,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i11
 0":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":9,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,11 +120,14 @@ var activeTableTab = "activeTableTab";
 <br>
 <pre>@InterfaceAudience.LimitedPrivate(value="Tools")
  @InterfaceStability.Evolving
-public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.212">HBaseFsck</a>
+public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.216">HBaseFsck</a>
 extends org.apache.hadoop.conf.Configured
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true" title="class or interface in java.io">Closeable</a></pre>
 <div class="block">HBaseFsck (hbck) is a tool for checking and repairing region consistency and
- table integrity problems in a corrupted HBase.
+ table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not
+ work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.
+ See hbck2 (HBASE-19121) for a hbck tool for hbase2.
+
  <p>
  Region consistency checks verify that hbase:meta, region deployment on region
  servers and the state of data in HDFS (.regioninfo files) all are in
@@ -193,7 +196,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></span></code>&nbsp;</td>
 </tr>
 <tr class="altColor">
-<td class="colFirst"><code>private class&nbsp;</code></td>
+<td class="colFirst"><code>private static class&nbsp;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></span></code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
@@ -447,8 +450,10 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#forceExclusive">forceExclusive</a></span></code>&nbsp;</td>
 </tr>
 <tr class="rowColor">
-<td class="colFirst"><code>private static <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#HBCK_LOCK_FILE">HBCK_LOCK_FILE</a></span></code>&nbsp;</td>
+<td class="colFirst"><code>static <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a></code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#HBCK_LOCK_FILE">HBCK_LOCK_FILE</a></span></code>
+<div class="block">Here is where hbase-1.x used to default the lock for hbck1.</div>
+</td>
 </tr>
 <tr class="altColor">
 <td class="colFirst"><code>private org.apache.hadoop.fs.Path</code></td>
@@ -670,8 +675,9 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 </td>
 </tr>
 <tr id="i7" class="rowColor">
-<td class="colFirst"><code>private org.apache.hadoop.fs.FSDataOutputStream</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#checkAndMarkRunningHbck--">checkAndMarkRunningHbck</a></span>()</code>
+<td class="colFirst"><code>static <a href="../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;org.apache.hadoop.fs.Path,org.apache.hadoop.fs.FSDataOutputStream&gt;</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#checkAndMarkRunningHbck-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">checkAndMarkRunningHbck</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+                       <a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>
 <div class="block">This method maintains a lock using a file.</div>
 </td>
 </tr>
@@ -747,27 +753,35 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createHFileCorruptionChecker-boolean-">createHFileCorruptionChecker</a></span>(boolean&nbsp;sidelineCorruptHFiles)</code>&nbsp;</td>
 </tr>
 <tr id="i21" class="rowColor">
+<td class="colFirst"><code>static <a href="../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a></code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createLockRetryCounterFactory-org.apache.hadoop.conf.Configuration-">createLockRetryCounterFactory</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
+</tr>
+<tr id="i22" class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegion.html" title="class in org.apache.hadoop.hbase.regionserver">HRegion</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createNewMeta-java.lang.String-">createNewMeta</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;walFactoryID)</code>
 <div class="block">This borrows code from MasterFileSystem.bootstrap().</div>
 </td>
 </tr>
-<tr id="i22" class="altColor">
+<tr id="i23" class="rowColor">
 <td class="colFirst"><code>private static <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true" title="class or interface in java.util.concurrent">ExecutorService</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createThreadPool-org.apache.hadoop.conf.Configuration-">createThreadPool</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
 </tr>
-<tr id="i23" class="rowColor">
+<tr id="i24" class="altColor">
+<td class="colFirst"><code>private static <a href="../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a></code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createZnodeRetryCounterFactory-org.apache.hadoop.conf.Configuration-">createZnodeRetryCounterFactory</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
+</tr>
+<tr id="i25" class="rowColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/zookeeper/ZKWatcher.html" title="class in org.apache.hadoop.hbase.zookeeper">ZKWatcher</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createZooKeeperWatcher--">createZooKeeperWatcher</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i24" class="altColor">
+<tr id="i26" class="altColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#debugLsr-org.apache.hadoop.conf.Configuration-org.apache.hadoop.fs.Path-">debugLsr</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
         org.apache.hadoop.fs.Path&nbsp;p)</code>
 <div class="block">ls -r for debugging purposes</div>
 </td>
 </tr>
-<tr id="i25" class="rowColor">
+<tr id="i27" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#debugLsr-org.apache.hadoop.conf.Configuration-org.apache.hadoop.fs.Path-org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter-">debugLsr</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
         org.apache.hadoop.fs.Path&nbsp;p,
@@ -775,38 +789,38 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <div class="block">ls -r for debugging purposes</div>
 </td>
 </tr>
-<tr id="i26" class="altColor">
+<tr id="i28" class="altColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#debugLsr-org.apache.hadoop.fs.Path-">debugLsr</a></span>(org.apache.hadoop.fs.Path&nbsp;p)</code>
 <div class="block">ls -r for debugging purposes</div>
 </td>
 </tr>
-<tr id="i27" class="rowColor">
+<tr id="i29" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#deleteMetaRegion-byte:A-">deleteMetaRegion</a></span>(byte[]&nbsp;metaKey)</code>
 <div class="block">Deletes region from meta table</div>
 </td>
 </tr>
-<tr id="i28" class="altColor">
+<tr id="i30" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#deleteMetaRegion-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">deleteMetaRegion</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>
 <div class="block">Deletes region from meta table</div>
 </td>
 </tr>
-<tr id="i29" class="rowColor">
+<tr id="i31" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#dumpOverlapProblems-org.apache.hbase.thirdparty.com.google.common.collect.Multimap-">dumpOverlapProblems</a></span>(org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;regions)</code>&nbsp;</td>
 </tr>
-<tr id="i30" class="altColor">
+<tr id="i32" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#dumpSidelinedRegions-java.util.Map-">dumpSidelinedRegions</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;org.apache.hadoop.fs.Path,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;regions)</code>&nbsp;</td>
 </tr>
-<tr id="i31" class="rowColor">
+<tr id="i33" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#exec-java.util.concurrent.ExecutorService-java.lang.String:A-">exec</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true" title="class or interface in java.util.concurrent">ExecutorService</a>&nbsp;exec,
     <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</code>&nbsp;</td>
 </tr>
-<tr id="i32" class="altColor">
+<tr id="i34" class="altColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#fabricateTableInfo-org.apache.hadoop.hbase.util.FSTableDescriptors-org.apache.hadoop.hbase.TableName-java.util.Set-">fabricateTableInfo</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/FSTableDescriptors.html" title="class in org.apache.hadoop.hbase.util">FSTableDescriptors</a>&nbsp;fstd,
                   <a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;tableName,
@@ -815,84 +829,84 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
  1.</div>
 </td>
 </tr>
-<tr id="i33" class="rowColor">
+<tr id="i35" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#fixEmptyMetaCells--">fixEmptyMetaCells</a></span>()</code>
 <div class="block">To fix the empty REGIONINFO_QUALIFIER rows from hbase:meta <br></div>
 </td>
 </tr>
-<tr id="i34" class="altColor">
+<tr id="i36" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#fixOrphanTables--">fixOrphanTables</a></span>()</code>
 <div class="block">To fix orphan table by creating a .tableinfo file under tableDir <br>
  1.</div>
 </td>
 </tr>
-<tr id="i35" class="rowColor">
+<tr id="i37" class="rowColor">
 <td class="colFirst"><code>private <a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/Put.html" title="class in org.apache.hadoop.hbase.client">Put</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#generatePuts-java.util.SortedMap-">generatePuts</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;&nbsp;tablesInfo)</code>
 <div class="block">Generate set of puts to add to new meta.</div>
 </td>
 </tr>
-<tr id="i36" class="altColor">
+<tr id="i38" class="altColor">
 <td class="colFirst"><code>private <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getColumnFamilyList-java.util.Set-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">getColumnFamilyList</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt;&nbsp;columns,
                    <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi)</code>
 <div class="block">To get the column family list according to the column family dirs</div>
 </td>
 </tr>
-<tr id="i37" class="rowColor">
+<tr id="i39" class="rowColor">
 <td class="colFirst"><code>(package private) static <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getErrorReporter-org.apache.hadoop.conf.Configuration-">getErrorReporter</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
 </tr>
-<tr id="i38" class="altColor">
+<tr id="i40" class="altColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getErrors--">getErrors</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i39" class="rowColor">
+<tr id="i41" class="rowColor">
 <td class="colFirst"><code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getHFilecorruptionChecker--">getHFilecorruptionChecker</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i40" class="altColor">
+<tr id="i42" class="altColor">
 <td class="colFirst"><code>(package private) <a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getIncludedTables--">getIncludedTables</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i41" class="rowColor">
+<tr id="i43" class="rowColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getMaxMerge--">getMaxMerge</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i42" class="altColor">
+<tr id="i44" class="altColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getMaxOverlapsToSideline--">getMaxOverlapsToSideline</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i43" class="rowColor">
+<tr id="i45" class="rowColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getMetaRegionServerName-int-">getMetaRegionServerName</a></span>(int&nbsp;replicaId)</code>&nbsp;</td>
 </tr>
-<tr id="i44" class="altColor">
+<tr id="i46" class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getOrCreateInfo-java.lang.String-">getOrCreateInfo</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;name)</code>
 <div class="block">Gets the entry in regionInfo corresponding to the the given encoded
  region name.</div>
 </td>
 </tr>
-<tr id="i45" class="rowColor">
+<tr id="i47" class="rowColor">
 <td class="colFirst"><code>org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getOverlapGroups-org.apache.hadoop.hbase.TableName-">getOverlapGroups</a></span>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;table)</code>&nbsp;</td>
 </tr>
-<tr id="i46" class="altColor">
+<tr id="i48" class="altColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getRetCode--">getRetCode</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i47" class="rowColor">
+<tr id="i49" class="rowColor">
 <td class="colFirst"><code>private org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getSidelineDir--">getSidelineDir</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i48" class="altColor">
+<tr id="i50" class="altColor">
 <td class="colFirst"><code>(package private) <a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>[]</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getTableDescriptors-java.util.List-">getTableDescriptors</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&gt;&nbsp;tableNames)</code>&nbsp;</td>
 </tr>
-<tr id="i49" class="rowColor">
+<tr id="i51" class="rowColor">
 <td class="colFirst"><code>(package private) <a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>[]</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getTables-java.util.concurrent.atomic.AtomicInteger-">getTables</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicInteger</a>&nbsp;numSkipped)</code>
 <div class="block">Return a list of user-space table names whose metadata have not been
@@ -902,430 +916,434 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
  milliseconds specified by timelag, then the table is a candidate to be returned.</div>
 </td>
 </tr>
-<tr id="i50" class="altColor">
+<tr id="i52" class="altColor">
+<td class="colFirst"><code>static org.apache.hadoop.fs.Path</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#getTmpDir-org.apache.hadoop.conf.Configuration-">getTmpDir</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
+</tr>
+<tr id="i53" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#includeTable-org.apache.hadoop.hbase.TableName-">includeTable</a></span>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;table)</code>&nbsp;</td>
 </tr>
-<tr id="i51" class="rowColor">
+<tr id="i54" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isExclusive--">isExclusive</a></span>()</code>
 <div class="block">Only one instance of hbck can modify HBase at a time.</div>
 </td>
 </tr>
-<tr id="i52" class="altColor">
+<tr id="i55" class="rowColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isOptionsSupported-java.lang.String:A-">isOptionsSupported</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</code>&nbsp;</td>
 </tr>
-<tr id="i53" class="rowColor">
+<tr id="i56" class="altColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isTableDisabled-org.apache.hadoop.hbase.TableName-">isTableDisabled</a></span>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;tableName)</code>
 <div class="block">Check if the specified region's table is disabled.</div>
 </td>
 </tr>
-<tr id="i54" class="altColor">
+<tr id="i57" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#isTableIncluded-org.apache.hadoop.hbase.TableName-">isTableIncluded</a></span>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;table)</code>
 <div class="block">Only check/fix tables specified by the list,
  Empty list means all tables are included.</div>
 </td>
 </tr>
-<tr id="i55" class="rowColor">
+<tr id="i58" class="altColor">
 <td class="colFirst"><code>static byte[]</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#keyOnly-byte:A-">keyOnly</a></span>(byte[]&nbsp;b)</code>&nbsp;</td>
 </tr>
-<tr id="i56" class="altColor">
+<tr id="i59" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadDeployedRegions--">loadDeployedRegions</a></span>()</code>
 <div class="block">Get deployed regions according to the region servers.</div>
 </td>
 </tr>
-<tr id="i57" class="rowColor">
+<tr id="i60" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadHdfsRegionDirs--">loadHdfsRegionDirs</a></span>()</code>
 <div class="block">Scan HDFS for all regions, recording their information into
  regionInfoMap</div>
 </td>
 </tr>
-<tr id="i58" class="altColor">
+<tr id="i61" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadHdfsRegioninfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">loadHdfsRegioninfo</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi)</code>
 <div class="block">Read the .regioninfo file from the file system.</div>
 </td>
 </tr>
-<tr id="i59" class="rowColor">
+<tr id="i62" class="altColor">
 <td class="colFirst"><code>private <a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadHdfsRegionInfos--">loadHdfsRegionInfos</a></span>()</code>
 <div class="block">Populate hbi's from regionInfos loaded from file system.</div>
 </td>
 </tr>
-<tr id="i60" class="altColor">
+<tr id="i63" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadMetaEntries--">loadMetaEntries</a></span>()</code>
 <div class="block">Scan hbase:meta, adding all regions found to the regionInfo map.</div>
 </td>
 </tr>
-<tr id="i61" class="rowColor">
+<tr id="i64" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadTableInfosForTablesWithNoRegion--">loadTableInfosForTablesWithNoRegion</a></span>()</code>
 <div class="block">Loads table info's for tables that may not have been included, since there are no
  regions reported for the table, but table dir is there in hdfs</div>
 </td>
 </tr>
-<tr id="i62" class="altColor">
+<tr id="i65" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#loadTableStates--">loadTableStates</a></span>()</code>
 <div class="block">Load the list of disabled tables in ZK into local set.</div>
 </td>
 </tr>
-<tr id="i63" class="rowColor">
+<tr id="i66" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#logParallelMerge--">logParallelMerge</a></span>()</code>
 <div class="block">Log an appropriate message about whether or not overlapping merges are computed in parallel.</div>
 </td>
 </tr>
-<tr id="i64" class="altColor">
+<tr id="i67" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#main-java.lang.String:A-">main</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)</code>
 <div class="block">Main program</div>
 </td>
 </tr>
-<tr id="i65" class="rowColor">
+<tr id="i68" class="altColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#mergeRegionDirs-org.apache.hadoop.fs.Path-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">mergeRegionDirs</a></span>(org.apache.hadoop.fs.Path&nbsp;targetRegionDir,
                <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;contained)</code>
 <div class="block">Merge hdfs data by moving from contained HbckInfo into targetRegionDir.</div>
 </td>
 </tr>
-<tr id="i66" class="altColor">
+<tr id="i69" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offline-byte:A-">offline</a></span>(byte[]&nbsp;regionName)</code>
 <div class="block">This backwards-compatibility wrapper for permanently offlining a region
  that should not be alive.</div>
 </td>
 </tr>
-<tr id="i67" class="rowColor">
+<tr id="i70" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offlineHdfsIntegrityRepair--">offlineHdfsIntegrityRepair</a></span>()</code>
 <div class="block">This repair method analyzes hbase data in hdfs and repairs it to satisfy
  the table integrity rules.</div>
 </td>
 </tr>
-<tr id="i68" class="altColor">
+<tr id="i71" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offlineHLinkFileRepair--">offlineHLinkFileRepair</a></span>()</code>
 <div class="block">Scan all the store file names to find any lingering HFileLink files,
  which refer to some none-exiting files.</div>
 </td>
 </tr>
-<tr id="i69" class="rowColor">
+<tr id="i72" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#offlineReferenceFileRepair--">offlineReferenceFileRepair</a></span>()</code>
 <div class="block">Scan all the store file names to find any lingering reference files,
  which refer to some none-exiting files.</div>
 </td>
 </tr>
-<tr id="i70" class="altColor">
+<tr id="i73" class="rowColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#onlineConsistencyRepair--">onlineConsistencyRepair</a></span>()</code>
 <div class="block">This repair method requires the cluster to be online since it contacts
  region servers and the masters.</div>
 </td>
 </tr>
-<tr id="i71" class="rowColor">
+<tr id="i74" class="altColor">
 <td class="colFirst"><code>int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#onlineHbck--">onlineHbck</a></span>()</code>
 <div class="block">Contacts the master and prints out cluster-wide information</div>
 </td>
 </tr>
-<tr id="i72" class="altColor">
+<tr id="i75" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#preCheckPermission--">preCheckPermission</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i73" class="rowColor">
+<tr id="i76" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#printTableSummary-java.util.SortedMap-">printTableSummary</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;&nbsp;tablesInfo)</code>
 <div class="block">Prints summary of all tables found on the system.</div>
 </td>
 </tr>
-<tr id="i74" class="altColor">
+<tr id="i77" class="rowColor">
 <td class="colFirst"><code>protected <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#printUsageAndExit--">printUsageAndExit</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i75" class="rowColor">
+<tr id="i78" class="altColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#processRegionServers-java.util.Collection-">processRegionServers</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt;&nbsp;regionServerList)</code>
 <div class="block">Contacts each regionserver and fetches metadata about regions.</div>
 </td>
 </tr>
-<tr id="i76" class="altColor">
+<tr id="i79" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#rebuildMeta-boolean-">rebuildMeta</a></span>(boolean&nbsp;fix)</code>
 <div class="block">Rebuilds meta from information in hdfs/fs.</div>
 </td>
 </tr>
-<tr id="i77" class="rowColor">
+<tr id="i80" class="altColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#recordMetaRegion--">recordMetaRegion</a></span>()</code>
 <div class="block">Record the location of the hbase:meta region as found in ZooKeeper.</div>
 </td>
 </tr>
-<tr id="i78" class="altColor">
+<tr id="i81" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#removeHBCKMetaRecoveryWALDir-java.lang.String-">removeHBCKMetaRecoveryWALDir</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;walFactoryId)</code>
 <div class="block">Removes the empty Meta recovery WAL directory.</div>
 </td>
 </tr>
-<tr id="i79" class="rowColor">
+<tr id="i82" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#reportEmptyMetaCells--">reportEmptyMetaCells</a></span>()</code>
 <div class="block">TODO -- need to add tests for this.</div>
 </td>
 </tr>
-<tr id="i80" class="altColor">
+<tr id="i83" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#reportTablesInFlux--">reportTablesInFlux</a></span>()</code>
 <div class="block">TODO -- need to add tests for this.</div>
 </td>
 </tr>
-<tr id="i81" class="rowColor">
+<tr id="i84" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#resetSplitParent-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">resetSplitParent</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>
 <div class="block">Reset the split parent region info in meta table</div>
 </td>
 </tr>
-<tr id="i82" class="altColor">
+<tr id="i85" class="rowColor">
 <td class="colFirst"><code>private int</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#restoreHdfsIntegrity--">restoreHdfsIntegrity</a></span>()</code>
 <div class="block">This method determines if there are table integrity errors in HDFS.</div>
 </td>
 </tr>
-<tr id="i83" class="rowColor">
+<tr id="i86" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setCheckHdfs-boolean-">setCheckHdfs</a></span>(boolean&nbsp;checking)</code>&nbsp;</td>
 </tr>
-<tr id="i84" class="altColor">
+<tr id="i87" class="rowColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setCheckMetaOnly--">setCheckMetaOnly</a></span>()</code>
 <div class="block">Set hbase:meta check mode.</div>
 </td>
 </tr>
-<tr id="i85" class="rowColor">
+<tr id="i88" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setCleanReplicationBarrier-boolean-">setCleanReplicationBarrier</a></span>(boolean&nbsp;shouldClean)</code>&nbsp;</td>
 </tr>
-<tr id="i86" class="altColor">
+<tr id="i89" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setCleanReplicationBarrierTable-java.lang.String-">setCleanReplicationBarrierTable</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;cleanReplicationBarrierTable)</code>&nbsp;</td>
 </tr>
-<tr id="i87" class="rowColor">
+<tr id="i90" class="altColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setDisplayFullReport--">setDisplayFullReport</a></span>()</code>
 <div class="block">Display the full report from fsck.</div>
 </td>
 </tr>
-<tr id="i88" class="altColor">
+<tr id="i91" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixAssignments-boolean-">setFixAssignments</a></span>(boolean&nbsp;shouldFix)</code>
 <div class="block">Fix inconsistencies found by fsck.</div>
 </td>
 </tr>
-<tr id="i89" class="rowColor">
+<tr id="i92" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixEmptyMetaCells-boolean-">setFixEmptyMetaCells</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i90" class="altColor">
+<tr id="i93" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHdfsHoles-boolean-">setFixHdfsHoles</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i91" class="rowColor">
+<tr id="i94" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHdfsOrphans-boolean-">setFixHdfsOrphans</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i92" class="altColor">
+<tr id="i95" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHdfsOverlaps-boolean-">setFixHdfsOverlaps</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i93" class="rowColor">
+<tr id="i96" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixHFileLinks-boolean-">setFixHFileLinks</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i94" class="altColor">
+<tr id="i97" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixMeta-boolean-">setFixMeta</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i95" class="rowColor">
+<tr id="i98" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixReferenceFiles-boolean-">setFixReferenceFiles</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i96" class="altColor">
+<tr id="i99" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixReplication-boolean-">setFixReplication</a></span>(boolean&nbsp;shouldFix)</code>
 <div class="block">Set replication fix mode.</div>
 </td>
 </tr>
-<tr id="i97" class="rowColor">
+<tr id="i100" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixSplitParents-boolean-">setFixSplitParents</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i98" class="altColor">
+<tr id="i101" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixTableOrphans-boolean-">setFixTableOrphans</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i99" class="rowColor">
+<tr id="i102" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setFixVersionFile-boolean-">setFixVersionFile</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i100" class="altColor">
+<tr id="i103" class="rowColor">
 <td class="colFirst"><code>static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setForceExclusive--">setForceExclusive</a></span>()</code>
 <div class="block">Set exclusive mode.</div>
 </td>
 </tr>
-<tr id="i101" class="rowColor">
+<tr id="i104" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setHFileCorruptionChecker-org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker-">setHFileCorruptionChecker</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a>&nbsp;hfcc)</code>&nbsp;</td>
 </tr>
-<tr id="i102" class="altColor">
+<tr id="i105" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setIgnorePreCheckPermission-boolean-">setIgnorePreCheckPermission</a></span>(boolean&nbsp;ignorePreCheckPermission)</code>&nbsp;</td>
 </tr>
-<tr id="i103" class="rowColor">
+<tr id="i106" class="altColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setMasterInMaintenanceMode--">setMasterInMaintenanceMode</a></span>()</code>
 <div class="block">This method maintains an ephemeral znode.</div>
 </td>
 </tr>
-<tr id="i104" class="altColor">
+<tr id="i107" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setMaxMerge-int-">setMaxMerge</a></span>(int&nbsp;mm)</code>&nbsp;</td>
 </tr>
-<tr id="i105" class="rowColor">
+<tr id="i108" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setMaxOverlapsToSideline-int-">setMaxOverlapsToSideline</a></span>(int&nbsp;mo)</code>&nbsp;</td>
 </tr>
-<tr id="i106" class="altColor">
+<tr id="i109" class="rowColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setRegionBoundariesCheck--">setRegionBoundariesCheck</a></span>()</code>
 <div class="block">Set region boundaries check mode.</div>
 </td>
 </tr>
-<tr id="i107" class="rowColor">
+<tr id="i110" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setRemoveParents-boolean-">setRemoveParents</a></span>(boolean&nbsp;shouldFix)</code>&nbsp;</td>
 </tr>
-<tr id="i108" class="altColor">
+<tr id="i111" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setRetCode-int-">setRetCode</a></span>(int&nbsp;code)</code>&nbsp;</td>
 </tr>
-<tr id="i109" class="rowColor">
+<tr id="i112" class="altColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setShouldRerun--">setShouldRerun</a></span>()</code>
 <div class="block">Check if we should rerun fsck again.</div>
 </td>
 </tr>
-<tr id="i110" class="altColor">
+<tr id="i113" class="rowColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setSidelineBigOverlaps-boolean-">setSidelineBigOverlaps</a></span>(boolean&nbsp;sbo)</code>&nbsp;</td>
 </tr>
-<tr id="i111" class="rowColor">
+<tr id="i114" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setSidelineDir-java.lang.String-">setSidelineDir</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;sidelineDir)</code>&nbsp;</td>
 </tr>
-<tr id="i112" class="altColor">
+<tr id="i115" class="rowColor">
 <td class="colFirst"><code>(package private) static void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setSummary--">setSummary</a></span>()</code>
 <div class="block">Set summary mode.</div>
 </td>
 </tr>
-<tr id="i113" class="rowColor">
+<tr id="i116" class="altColor">
 <td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#setTimeLag-long-">setTimeLag</a></span>(long&nbsp;seconds)</code>
 <div class="block">We are interested in only those tables that have not changed their state in
  hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag</div>
 </td>
 </tr>
-<tr id="i114" class="altColor">
+<tr id="i117" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldCheckHdfs--">shouldCheckHdfs</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i115" class="rowColor">
+<tr id="i118" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixAssignments--">shouldFixAssignments</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i116" class="altColor">
+<tr id="i119" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixEmptyMetaCells--">shouldFixEmptyMetaCells</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i117" class="rowColor">
+<tr id="i120" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHdfsHoles--">shouldFixHdfsHoles</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i118" class="altColor">
+<tr id="i121" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHdfsOrphans--">shouldFixHdfsOrphans</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i119" class="rowColor">
+<tr id="i122" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHdfsOverlaps--">shouldFixHdfsOverlaps</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i120" class="altColor">
+<tr id="i123" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixHFileLinks--">shouldFixHFileLinks</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i121" class="rowColor">
+<tr id="i124" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixMeta--">shouldFixMeta</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i122" class="altColor">
+<tr id="i125" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixReferenceFiles--">shouldFixReferenceFiles</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i123" class="rowColor">
+<tr id="i126" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixSplitParents--">shouldFixSplitParents</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i124" class="altColor">
+<tr id="i127" class="rowColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixTableOrphans--">shouldFixTableOrphans</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i125" class="rowColor">
+<tr id="i128" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldFixVersionFile--">shouldFixVersionFile</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i126" class="altColor">
+<tr id="i129" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldIgnorePreCheckPermission--">shouldIgnorePreCheckPermission</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i127" class="rowColor">
+<tr id="i130" class="altColor">
 <td class="colFirst"><code>(package private) boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldRemoveParents--">shouldRemoveParents</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i128" class="altColor">
+<tr id="i131" class="rowColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldRerun--">shouldRerun</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i129" class="rowColor">
+<tr id="i132" class="altColor">
 <td class="colFirst"><code>boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#shouldSidelineBigOverlaps--">shouldSidelineBigOverlaps</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i130" class="altColor">
+<tr id="i133" class="rowColor">
 <td class="colFirst"><code>private boolean</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineFile-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.fs.Path-org.apache.hadoop.fs.Path-">sidelineFile</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
             org.apache.hadoop.fs.Path&nbsp;hbaseRoot,
             org.apache.hadoop.fs.Path&nbsp;path)</code>&nbsp;</td>
 </tr>
-<tr id="i131" class="rowColor">
+<tr id="i134" class="altColor">
 <td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineOldMeta--">sidelineOldMeta</a></span>()</code>&nbsp;</td>
 </tr>
-<tr id="i132" class="altColor">
+<tr id="i135" class="rowColor">
 <td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineRegionDir-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">sidelineRegionDir</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>
 <div class="block">Sideline a region dir (instead of deleting it)</div>
 </td>
 </tr>
-<tr id="i133" class="rowColor">
+<tr id="i136" class="altColor">
 <td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineRegionDir-org.apache.hadoop.fs.FileSystem-java.lang.String-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">sidelineRegionDir</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;parentDir,
@@ -1333,7 +1351,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <div class="block">Sideline a region dir (instead of deleting it)</div>
 </td>
 </tr>
-<tr id="i134" class="altColor">
+<tr id="i137" class="rowColor">
 <td class="colFirst"><code>(package private) void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#sidelineTable-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.hbase.TableName-org.apache.hadoop.fs.Path-org.apache.hadoop.fs.Path-">sidelineTable</a></span>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
              <a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;tableName,
@@ -1342,30 +1360,30 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <div class="block">Side line an entire table.</div>
 </td>
 </tr>
-<tr id="i135" class="rowColor">
+<tr id="i138" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#suggestFixes-java.util.SortedMap-">suggestFixes</a></span>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true" title="class or interface in java.util">SortedMap</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt;&nbsp;tablesInfo)</code>
 <div class="block">Suggest fixes for each table</div>
 </td>
 </tr>
-<tr id="i136" class="altColor">
+<tr id="i139" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#tryAssignmentRepair-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-java.lang.String-">tryAssignmentRepair</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi,
                    <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;msg)</code>&nbsp;</td>
 </tr>
-<tr id="i137" class="rowColor">
+<tr id="i140" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#unassignMetaReplica-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">unassignMetaReplica</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>&nbsp;</td>
 </tr>
-<tr id="i138" class="altColor">
+<tr id="i141" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#undeployRegions-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">undeployRegions</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>&nbsp;</td>
 </tr>
-<tr id="i139" class="rowColor">
+<tr id="i142" class="altColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#undeployRegionsForHbi-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">undeployRegionsForHbi</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)</code>&nbsp;</td>
 </tr>
-<tr id="i140" class="altColor">
+<tr id="i143" class="rowColor">
 <td class="colFirst"><code>private void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#unlockHbck--">unlockHbck</a></span>()</code>&nbsp;</td>
 </tr>
@@ -1404,7 +1422,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_TIME_LAG</h4>
-<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.213">DEFAULT_TIME_LAG</a></pre>
+<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.217">DEFAULT_TIME_LAG</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_TIME_LAG">Constant Field Values</a></dd>
@@ -1417,7 +1435,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_SLEEP_BEFORE_RERUN</h4>
-<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.214">DEFAULT_SLEEP_BEFORE_RERUN</a></pre>
+<pre>public static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.218">DEFAULT_SLEEP_BEFORE_RERUN</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_SLEEP_BEFORE_RERUN">Constant Field Values</a></dd>
@@ -1430,7 +1448,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>MAX_NUM_THREADS</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.215">MAX_NUM_THREADS</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.219">MAX_NUM_THREADS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.MAX_NUM_THREADS">Constant Field Values</a></dd>
@@ -1443,7 +1461,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>rsSupportsOffline</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.216">rsSupportsOffline</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.220">rsSupportsOffline</a></pre>
 </li>
 </ul>
 <a name="DEFAULT_OVERLAPS_TO_SIDELINE">
@@ -1452,7 +1470,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_OVERLAPS_TO_SIDELINE</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.217">DEFAULT_OVERLAPS_TO_SIDELINE</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.221">DEFAULT_OVERLAPS_TO_SIDELINE</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_OVERLAPS_TO_SIDELINE">Constant Field Values</a></dd>
@@ -1465,7 +1483,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_MAX_MERGE</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.218">DEFAULT_MAX_MERGE</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.222">DEFAULT_MAX_MERGE</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_MAX_MERGE">Constant Field Values</a></dd>
@@ -1478,7 +1496,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>TO_BE_LOADED</h4>
-<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.219">TO_BE_LOADED</a></pre>
+<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.223">TO_BE_LOADED</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.TO_BE_LOADED">Constant Field Values</a></dd>
@@ -1491,7 +1509,9 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>HBCK_LOCK_FILE</h4>
-<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.220">HBCK_LOCK_FILE</a></pre>
+<pre>public static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.229">HBCK_LOCK_FILE</a></pre>
+<div class="block">Here is where hbase-1.x used to default the lock for hbck1.
+ It puts in place a lock when it goes to write/make changes.</div>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.HBCK_LOCK_FILE">Constant Field Values</a></dd>
@@ -1504,7 +1524,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_MAX_LOCK_FILE_ATTEMPTS</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.221">DEFAULT_MAX_LOCK_FILE_ATTEMPTS</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.230">DEFAULT_MAX_LOCK_FILE_ATTEMPTS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_MAX_LOCK_FILE_ATTEMPTS">Constant Field Values</a></dd>
@@ -1517,7 +1537,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.222">DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.231">DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL">Constant Field Values</a></dd>
@@ -1530,7 +1550,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.223">DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.232">DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME">Constant Field Values</a></dd>
@@ -1543,7 +1563,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_WAIT_FOR_LOCK_TIMEOUT</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.228">DEFAULT_WAIT_FOR_LOCK_TIMEOUT</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.237">DEFAULT_WAIT_FOR_LOCK_TIMEOUT</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_WAIT_FOR_LOCK_TIMEOUT">Constant Field Values</a></dd>
@@ -1556,7 +1576,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.229">DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.238">DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS">Constant Field Values</a></dd>
@@ -1569,7 +1589,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.230">DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.239">DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL">Constant Field Values</a></dd>
@@ -1582,7 +1602,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.231">DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.240">DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME">Constant Field Values</a></dd>
@@ -1595,7 +1615,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>LOG</h4>
-<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.236">LOG</a></pre>
+<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.245">LOG</a></pre>
 <div class="block">Internal resources</div>
 </li>
 </ul>
@@ -1605,7 +1625,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>status</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ClusterMetrics.html" title="interface in org.apache.hadoop.hbase">ClusterMetrics</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.237">status</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ClusterMetrics.html" title="interface in org.apache.hadoop.hbase">ClusterMetrics</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.246">status</a></pre>
 </li>
 </ul>
 <a name="connection">
@@ -1614,7 +1634,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>connection</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.238">connection</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.247">connection</a></pre>
 </li>
 </ul>
 <a name="admin">
@@ -1623,7 +1643,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>admin</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Admin.html" title="interface in org.apache.hadoop.hbase.client">Admin</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.239">admin</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Admin.html" title="interface in org.apache.hadoop.hbase.client">Admin</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.248">admin</a></pre>
 </li>
 </ul>
 <a name="meta">
@@ -1632,7 +1652,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>meta</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Table.html" title="interface in org.apache.hadoop.hbase.client">Table</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.240">meta</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/Table.html" title="interface in org.apache.hadoop.hbase.client">Table</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.249">meta</a></pre>
 </li>
 </ul>
 <a name="executor">
@@ -1641,7 +1661,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>executor</h4>
-<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true" title="class or interface in java.util.concurrent">ExecutorService</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.242">executor</a></pre>
+<pre>protected&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true" title="class or interface in java.util.concurrent">ExecutorService</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.251">executor</a></pre>
 </li>
 </ul>
 <a name="startMillis">
@@ -1650,7 +1670,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>startMillis</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.243">startMillis</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.252">startMillis</a></pre>
 </li>
 </ul>
 <a name="hfcc">
@@ -1659,7 +1679,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>hfcc</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.244">hfcc</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.html" title="class in org.apache.hadoop.hbase.util.hbck">HFileCorruptionChecker</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.253">hfcc</a></pre>
 </li>
 </ul>
 <a name="retcode">
@@ -1668,7 +1688,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>retcode</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.245">retcode</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.254">retcode</a></pre>
 </li>
 </ul>
 <a name="HBCK_LOCK_PATH">
@@ -1677,7 +1697,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>HBCK_LOCK_PATH</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.246">HBCK_LOCK_PATH</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.255">HBCK_LOCK_PATH</a></pre>
 </li>
 </ul>
 <a name="hbckOutFd">
@@ -1686,7 +1706,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>hbckOutFd</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.247">hbckOutFd</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.256">hbckOutFd</a></pre>
 </li>
 </ul>
 <a name="hbckLockCleanup">
@@ -1695,7 +1715,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>hbckLockCleanup</h4>
-<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicBoolean</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.251">hbckLockCleanup</a></pre>
+<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true" title="class or interface in java.util.concurrent.atomic">AtomicBoolean</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.260">hbckLockCleanup</a></pre>
 </li>
 </ul>
 <a name="unsupportedOptionsInV2">
@@ -1704,7 +1724,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>unsupportedOptionsInV2</h4>
-<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.254">unsupportedOptionsInV2</a></pre>
+<pre>private static final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.263">unsupportedOptionsInV2</a></pre>
 </li>
 </ul>
 <a name="details">
@@ -1713,7 +1733,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>details</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.262">details</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.271">details</a></pre>
 <div class="block">Options</div>
 </li>
 </ul>
@@ -1723,7 +1743,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>timelag</h4>
-<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.263">timelag</a></pre>
+<pre>private&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.272">timelag</a></pre>
 </li>
 </ul>
 <a name="forceExclusive">
@@ -1732,7 +1752,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>forceExclusive</h4>
-<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.264">forceExclusive</a></pre>
+<pre>private static&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.273">forceExclusive</a></pre>
 </li>
 </ul>
 <a name="fixAssignments">
@@ -1741,7 +1761,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixAssignments</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.265">fixAssignments</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.274">fixAssignments</a></pre>
 </li>
 </ul>
 <a name="fixMeta">
@@ -1750,7 +1770,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>fixMeta</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.266">fixMeta</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.275">fixMeta</a></pre>
 </li>
 </ul>
 <a name="checkHdfs">
@@ -1759,7 +1779,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 <ul class="blockList">
 <li class="blockList">
 <h4>checkHdfs</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apa

<TRUNCATED>

[24/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 7df71bd..a990153 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -201,3634 +201,3643 @@
 <span class="sourceLineNo">193</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.193"></a>
 <span class="sourceLineNo">194</span>import org.apache.hadoop.hbase.util.CompressionTest;<a name="line.194"></a>
 <span class="sourceLineNo">195</span>import org.apache.hadoop.hbase.util.EncryptionTest;<a name="line.195"></a>
-<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.196"></a>
-<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.197"></a>
-<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.199"></a>
-<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.200"></a>
-<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.201"></a>
-<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.203"></a>
-<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.204"></a>
-<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.206"></a>
-<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.210"></a>
-<span class="sourceLineNo">211</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.211"></a>
-<span class="sourceLineNo">212</span>import org.apache.zookeeper.KeeperException;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>import org.eclipse.jetty.server.Server;<a name="line.213"></a>
-<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.215"></a>
-<span class="sourceLineNo">216</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>import org.slf4j.Logger;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>import org.slf4j.LoggerFactory;<a name="line.218"></a>
-<span class="sourceLineNo">219</span><a name="line.219"></a>
-<span class="sourceLineNo">220</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.222"></a>
-<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.223"></a>
-<span class="sourceLineNo">224</span><a name="line.224"></a>
-<span class="sourceLineNo">225</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.226"></a>
-<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.227"></a>
-<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.230"></a>
-<span class="sourceLineNo">231</span><a name="line.231"></a>
-<span class="sourceLineNo">232</span>/**<a name="line.232"></a>
-<span class="sourceLineNo">233</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.233"></a>
-<span class="sourceLineNo">234</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.234"></a>
-<span class="sourceLineNo">235</span> * run the cluster.  All others park themselves in their constructor until<a name="line.235"></a>
-<span class="sourceLineNo">236</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.236"></a>
-<span class="sourceLineNo">237</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.237"></a>
-<span class="sourceLineNo">238</span> *<a name="line.238"></a>
-<span class="sourceLineNo">239</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.239"></a>
-<span class="sourceLineNo">240</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.240"></a>
-<span class="sourceLineNo">241</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.241"></a>
-<span class="sourceLineNo">242</span> *<a name="line.242"></a>
-<span class="sourceLineNo">243</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.243"></a>
-<span class="sourceLineNo">244</span> *<a name="line.244"></a>
-<span class="sourceLineNo">245</span> * @see org.apache.zookeeper.Watcher<a name="line.245"></a>
-<span class="sourceLineNo">246</span> */<a name="line.246"></a>
-<span class="sourceLineNo">247</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.247"></a>
-<span class="sourceLineNo">248</span>@SuppressWarnings("deprecation")<a name="line.248"></a>
-<span class="sourceLineNo">249</span>public class HMaster extends HRegionServer implements MasterServices {<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private static Logger LOG = LoggerFactory.getLogger(HMaster.class);<a name="line.250"></a>
-<span class="sourceLineNo">251</span><a name="line.251"></a>
-<span class="sourceLineNo">252</span>  /**<a name="line.252"></a>
-<span class="sourceLineNo">253</span>   * Protection against zombie master. Started once Master accepts active responsibility and<a name="line.253"></a>
-<span class="sourceLineNo">254</span>   * starts taking over responsibilities. Allows a finite time window before giving up ownership.<a name="line.254"></a>
-<span class="sourceLineNo">255</span>   */<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private static class InitializationMonitor extends HasThread {<a name="line.256"></a>
-<span class="sourceLineNo">257</span>    /** The amount of time in milliseconds to sleep before checking initialization status. */<a name="line.257"></a>
-<span class="sourceLineNo">258</span>    public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";<a name="line.258"></a>
-<span class="sourceLineNo">259</span>    public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);<a name="line.259"></a>
-<span class="sourceLineNo">260</span><a name="line.260"></a>
-<span class="sourceLineNo">261</span>    /**<a name="line.261"></a>
-<span class="sourceLineNo">262</span>     * When timeout expired and initialization has not complete, call {@link System#exit(int)} when<a name="line.262"></a>
-<span class="sourceLineNo">263</span>     * true, do nothing otherwise.<a name="line.263"></a>
-<span class="sourceLineNo">264</span>     */<a name="line.264"></a>
-<span class="sourceLineNo">265</span>    public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";<a name="line.265"></a>
-<span class="sourceLineNo">266</span>    public static final boolean HALT_DEFAULT = false;<a name="line.266"></a>
-<span class="sourceLineNo">267</span><a name="line.267"></a>
-<span class="sourceLineNo">268</span>    private final HMaster master;<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    private final long timeout;<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    private final boolean haltOnTimeout;<a name="line.270"></a>
-<span class="sourceLineNo">271</span><a name="line.271"></a>
-<span class="sourceLineNo">272</span>    /** Creates a Thread that monitors the {@link #isInitialized()} state. */<a name="line.272"></a>
-<span class="sourceLineNo">273</span>    InitializationMonitor(HMaster master) {<a name="line.273"></a>
-<span class="sourceLineNo">274</span>      super("MasterInitializationMonitor");<a name="line.274"></a>
-<span class="sourceLineNo">275</span>      this.master = master;<a name="line.275"></a>
-<span class="sourceLineNo">276</span>      this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);<a name="line.276"></a>
-<span class="sourceLineNo">277</span>      this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);<a name="line.277"></a>
-<span class="sourceLineNo">278</span>      this.setDaemon(true);<a name="line.278"></a>
-<span class="sourceLineNo">279</span>    }<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>    @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>    public void run() {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>      try {<a name="line.283"></a>
-<span class="sourceLineNo">284</span>        while (!master.isStopped() &amp;&amp; master.isActiveMaster()) {<a name="line.284"></a>
-<span class="sourceLineNo">285</span>          Thread.sleep(timeout);<a name="line.285"></a>
-<span class="sourceLineNo">286</span>          if (master.isInitialized()) {<a name="line.286"></a>
-<span class="sourceLineNo">287</span>            LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");<a name="line.287"></a>
-<span class="sourceLineNo">288</span>          } else {<a name="line.288"></a>
-<span class="sourceLineNo">289</span>            LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"<a name="line.289"></a>
-<span class="sourceLineNo">290</span>                + " consider submitting a bug report including a thread dump of this process.");<a name="line.290"></a>
-<span class="sourceLineNo">291</span>            if (haltOnTimeout) {<a name="line.291"></a>
-<span class="sourceLineNo">292</span>              LOG.error("Zombie Master exiting. Thread dump to stdout");<a name="line.292"></a>
-<span class="sourceLineNo">293</span>              Threads.printThreadInfo(System.out, "Zombie HMaster");<a name="line.293"></a>
-<span class="sourceLineNo">294</span>              System.exit(-1);<a name="line.294"></a>
-<span class="sourceLineNo">295</span>            }<a name="line.295"></a>
-<span class="sourceLineNo">296</span>          }<a name="line.296"></a>
-<span class="sourceLineNo">297</span>        }<a name="line.297"></a>
-<span class="sourceLineNo">298</span>      } catch (InterruptedException ie) {<a name="line.298"></a>
-<span class="sourceLineNo">299</span>        LOG.trace("InitMonitor thread interrupted. Existing.");<a name="line.299"></a>
-<span class="sourceLineNo">300</span>      }<a name="line.300"></a>
-<span class="sourceLineNo">301</span>    }<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  }<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  // MASTER is name of the webapp and the attribute name used stuffing this<a name="line.304"></a>
-<span class="sourceLineNo">305</span>  //instance into web context.<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  public static final String MASTER = "master";<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  // Manager and zk listener for master election<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private final ActiveMasterManager activeMasterManager;<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Region server tracker<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private RegionServerTracker regionServerTracker;<a name="line.311"></a>
-<span class="sourceLineNo">312</span>  // Draining region server tracker<a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private DrainingServerTracker drainingServerTracker;<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  // Tracker for load balancer state<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  LoadBalancerTracker loadBalancerTracker;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  // Tracker for meta location, if any client ZK quorum specified<a name="line.316"></a>
-<span class="sourceLineNo">317</span>  MetaLocationSyncer metaLocationSyncer;<a name="line.317"></a>
-<span class="sourceLineNo">318</span>  // Tracker for active master location, if any client ZK quorum specified<a name="line.318"></a>
-<span class="sourceLineNo">319</span>  MasterAddressSyncer masterAddressSyncer;<a name="line.319"></a>
-<span class="sourceLineNo">320</span><a name="line.320"></a>
-<span class="sourceLineNo">321</span>  // Tracker for split and merge state<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private SplitOrMergeTracker splitOrMergeTracker;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  // Tracker for region normalizer state<a name="line.324"></a>
-<span class="sourceLineNo">325</span>  private RegionNormalizerTracker regionNormalizerTracker;<a name="line.325"></a>
-<span class="sourceLineNo">326</span><a name="line.326"></a>
-<span class="sourceLineNo">327</span>  //Tracker for master maintenance mode setting<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private MasterMaintenanceModeTracker maintenanceModeTracker;<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private ClusterSchemaService clusterSchemaService;<a name="line.330"></a>
-<span class="sourceLineNo">331</span><a name="line.331"></a>
-<span class="sourceLineNo">332</span>  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    "hbase.master.wait.on.service.seconds";<a name="line.333"></a>
-<span class="sourceLineNo">334</span>  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;<a name="line.334"></a>
-<span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  // Metrics for the HMaster<a name="line.336"></a>
-<span class="sourceLineNo">337</span>  final MetricsMaster metricsMaster;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  // file system manager for the master FS operations<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private MasterFileSystem fileSystemManager;<a name="line.339"></a>
-<span class="sourceLineNo">340</span>  private MasterWalManager walManager;<a name="line.340"></a>
-<span class="sourceLineNo">341</span><a name="line.341"></a>
-<span class="sourceLineNo">342</span>  // server manager to deal with region server info<a name="line.342"></a>
-<span class="sourceLineNo">343</span>  private volatile ServerManager serverManager;<a name="line.343"></a>
-<span class="sourceLineNo">344</span><a name="line.344"></a>
-<span class="sourceLineNo">345</span>  // manager of assignment nodes in zookeeper<a name="line.345"></a>
-<span class="sourceLineNo">346</span>  private AssignmentManager assignmentManager;<a name="line.346"></a>
-<span class="sourceLineNo">347</span><a name="line.347"></a>
-<span class="sourceLineNo">348</span>  // manager of replication<a name="line.348"></a>
-<span class="sourceLineNo">349</span>  private ReplicationPeerManager replicationPeerManager;<a name="line.349"></a>
-<span class="sourceLineNo">350</span><a name="line.350"></a>
-<span class="sourceLineNo">351</span>  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;<a name="line.351"></a>
-<span class="sourceLineNo">352</span><a name="line.352"></a>
-<span class="sourceLineNo">353</span>  // buffer for "fatal error" notices from region servers<a name="line.353"></a>
-<span class="sourceLineNo">354</span>  // in the cluster. This is only used for assisting<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  // operations/debugging.<a name="line.355"></a>
-<span class="sourceLineNo">356</span>  MemoryBoundedLogMessageBuffer rsFatals;<a name="line.356"></a>
-<span class="sourceLineNo">357</span><a name="line.357"></a>
-<span class="sourceLineNo">358</span>  // flag set after we become the active master (used for testing)<a name="line.358"></a>
-<span class="sourceLineNo">359</span>  private volatile boolean activeMaster = false;<a name="line.359"></a>
-<span class="sourceLineNo">360</span><a name="line.360"></a>
-<span class="sourceLineNo">361</span>  // flag set after we complete initialization once active<a name="line.361"></a>
-<span class="sourceLineNo">362</span>  private final ProcedureEvent&lt;?&gt; initialized = new ProcedureEvent&lt;&gt;("master initialized");<a name="line.362"></a>
-<span class="sourceLineNo">363</span><a name="line.363"></a>
-<span class="sourceLineNo">364</span>  // flag set after master services are started,<a name="line.364"></a>
-<span class="sourceLineNo">365</span>  // initialization may have not completed yet.<a name="line.365"></a>
-<span class="sourceLineNo">366</span>  volatile boolean serviceStarted = false;<a name="line.366"></a>
-<span class="sourceLineNo">367</span><a name="line.367"></a>
-<span class="sourceLineNo">368</span>  // Maximum time we should run balancer for<a name="line.368"></a>
-<span class="sourceLineNo">369</span>  private final int maxBlancingTime;<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  // Maximum percent of regions in transition when balancing<a name="line.370"></a>
-<span class="sourceLineNo">371</span>  private final double maxRitPercent;<a name="line.371"></a>
-<span class="sourceLineNo">372</span><a name="line.372"></a>
-<span class="sourceLineNo">373</span>  private final LockManager lockManager = new LockManager(this);<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>  private LoadBalancer balancer;<a name="line.375"></a>
-<span class="sourceLineNo">376</span>  private RegionNormalizer normalizer;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>  private BalancerChore balancerChore;<a name="line.377"></a>
-<span class="sourceLineNo">378</span>  private RegionNormalizerChore normalizerChore;<a name="line.378"></a>
-<span class="sourceLineNo">379</span>  private ClusterStatusChore clusterStatusChore;<a name="line.379"></a>
-<span class="sourceLineNo">380</span>  private ClusterStatusPublisher clusterStatusPublisherChore = null;<a name="line.380"></a>
-<span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  CatalogJanitor catalogJanitorChore;<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  private LogCleaner logCleaner;<a name="line.383"></a>
-<span class="sourceLineNo">384</span>  private HFileCleaner hfileCleaner;<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  private ReplicationBarrierCleaner replicationBarrierCleaner;<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  private MobCompactionChore mobCompactChore;<a name="line.387"></a>
-<span class="sourceLineNo">388</span>  private MasterMobCompactionThread mobCompactThread;<a name="line.388"></a>
-<span class="sourceLineNo">389</span>  // used to synchronize the mobCompactionStates<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  private final IdLock mobCompactionLock = new IdLock();<a name="line.390"></a>
-<span class="sourceLineNo">391</span>  // save the information of mob compactions in tables.<a name="line.391"></a>
-<span class="sourceLineNo">392</span>  // the key is table name, the value is the number of compactions in that table.<a name="line.392"></a>
-<span class="sourceLineNo">393</span>  private Map&lt;TableName, AtomicInteger&gt; mobCompactionStates = Maps.newConcurrentMap();<a name="line.393"></a>
-<span class="sourceLineNo">394</span><a name="line.394"></a>
-<span class="sourceLineNo">395</span>  MasterCoprocessorHost cpHost;<a name="line.395"></a>
-<span class="sourceLineNo">396</span><a name="line.396"></a>
-<span class="sourceLineNo">397</span>  private final boolean preLoadTableDescriptors;<a name="line.397"></a>
-<span class="sourceLineNo">398</span><a name="line.398"></a>
-<span class="sourceLineNo">399</span>  // Time stamps for when a hmaster became active<a name="line.399"></a>
-<span class="sourceLineNo">400</span>  private long masterActiveTime;<a name="line.400"></a>
-<span class="sourceLineNo">401</span><a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // Time stamp for when HMaster finishes becoming Active Master<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  private long masterFinishedInitializationTime;<a name="line.403"></a>
-<span class="sourceLineNo">404</span><a name="line.404"></a>
-<span class="sourceLineNo">405</span>  //should we check the compression codec type at master side, default true, HBASE-6370<a name="line.405"></a>
-<span class="sourceLineNo">406</span>  private final boolean masterCheckCompression;<a name="line.406"></a>
-<span class="sourceLineNo">407</span><a name="line.407"></a>
-<span class="sourceLineNo">408</span>  //should we check encryption settings at master side, default true<a name="line.408"></a>
-<span class="sourceLineNo">409</span>  private final boolean masterCheckEncryption;<a name="line.409"></a>
-<span class="sourceLineNo">410</span><a name="line.410"></a>
-<span class="sourceLineNo">411</span>  Map&lt;String, Service&gt; coprocessorServiceHandlers = Maps.newHashMap();<a name="line.411"></a>
-<span class="sourceLineNo">412</span><a name="line.412"></a>
-<span class="sourceLineNo">413</span>  // monitor for snapshot of hbase tables<a name="line.413"></a>
-<span class="sourceLineNo">414</span>  SnapshotManager snapshotManager;<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  // monitor for distributed procedures<a name="line.415"></a>
-<span class="sourceLineNo">416</span>  private MasterProcedureManagerHost mpmHost;<a name="line.416"></a>
-<span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  // it is assigned after 'initialized' guard set to true, so should be volatile<a name="line.418"></a>
-<span class="sourceLineNo">419</span>  private volatile MasterQuotaManager quotaManager;<a name="line.419"></a>
-<span class="sourceLineNo">420</span>  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  private QuotaObserverChore quotaObserverChore;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  private SnapshotQuotaObserverChore snapshotQuotaChore;<a name="line.422"></a>
-<span class="sourceLineNo">423</span><a name="line.423"></a>
-<span class="sourceLineNo">424</span>  private ProcedureExecutor&lt;MasterProcedureEnv&gt; procedureExecutor;<a name="line.424"></a>
-<span class="sourceLineNo">425</span>  private WALProcedureStore procedureStore;<a name="line.425"></a>
-<span class="sourceLineNo">426</span><a name="line.426"></a>
-<span class="sourceLineNo">427</span>  // handle table states<a name="line.427"></a>
-<span class="sourceLineNo">428</span>  private TableStateManager tableStateManager;<a name="line.428"></a>
-<span class="sourceLineNo">429</span><a name="line.429"></a>
-<span class="sourceLineNo">430</span>  private long splitPlanCount;<a name="line.430"></a>
-<span class="sourceLineNo">431</span>  private long mergePlanCount;<a name="line.431"></a>
-<span class="sourceLineNo">432</span><a name="line.432"></a>
-<span class="sourceLineNo">433</span>  /* Handle favored nodes information */<a name="line.433"></a>
-<span class="sourceLineNo">434</span>  private FavoredNodesManager favoredNodesManager;<a name="line.434"></a>
-<span class="sourceLineNo">435</span><a name="line.435"></a>
-<span class="sourceLineNo">436</span>  /** jetty server for master to redirect requests to regionserver infoServer */<a name="line.436"></a>
-<span class="sourceLineNo">437</span>  private Server masterJettyServer;<a name="line.437"></a>
-<span class="sourceLineNo">438</span><a name="line.438"></a>
-<span class="sourceLineNo">439</span>  public static class RedirectServlet extends HttpServlet {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    private static final long serialVersionUID = 2894774810058302473L;<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    private final int regionServerInfoPort;<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    private final String regionServerHostname;<a name="line.442"></a>
-<span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>    /**<a name="line.444"></a>
-<span class="sourceLineNo">445</span>     * @param infoServer that we're trying to send all requests to<a name="line.445"></a>
-<span class="sourceLineNo">446</span>     * @param hostname may be null. if given, will be used for redirects instead of host from client.<a name="line.446"></a>
-<span class="sourceLineNo">447</span>     */<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    public RedirectServlet(InfoServer infoServer, String hostname) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>       regionServerInfoPort = infoServer.getPort();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>       regionServerHostname = hostname;<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    }<a name="line.451"></a>
-<span class="sourceLineNo">452</span><a name="line.452"></a>
-<span class="sourceLineNo">453</span>    @Override<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    public void doGet(HttpServletRequest request,<a name="line.454"></a>
-<span class="sourceLineNo">455</span>        HttpServletResponse response) throws ServletException, IOException {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      String redirectHost = regionServerHostname;<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      if(redirectHost == null) {<a name="line.457"></a>
-<span class="sourceLineNo">458</span>        redirectHost = request.getServerName();<a name="line.458"></a>
-<span class="sourceLineNo">459</span>        if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {<a name="line.459"></a>
-<span class="sourceLineNo">460</span>          LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +<a name="line.460"></a>
-<span class="sourceLineNo">461</span>              MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +<a name="line.461"></a>
-<span class="sourceLineNo">462</span>              "your HBase deployment relies on client accessible names that the region server process " +<a name="line.462"></a>
-<span class="sourceLineNo">463</span>              "can't resolve locally, then you should set the previously mentioned configuration variable " +<a name="line.463"></a>
-<span class="sourceLineNo">464</span>              "to an appropriate hostname.");<a name="line.464"></a>
-<span class="sourceLineNo">465</span>          // no sending client provided input back to the client, so the goal host is just in the logs.<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +<a name="line.466"></a>
-<span class="sourceLineNo">467</span>              "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          return;<a name="line.469"></a>
-<span class="sourceLineNo">470</span>        }<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      }<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      // host and port we're using, but it's buried way too deep to do that ATM.<a name="line.473"></a>
-<span class="sourceLineNo">474</span>      String redirectUrl = request.getScheme() + "://"<a name="line.474"></a>
-<span class="sourceLineNo">475</span>        + redirectHost + ":" + regionServerInfoPort<a name="line.475"></a>
-<span class="sourceLineNo">476</span>        + request.getRequestURI();<a name="line.476"></a>
-<span class="sourceLineNo">477</span>      response.sendRedirect(redirectUrl);<a name="line.477"></a>
-<span class="sourceLineNo">478</span>    }<a name="line.478"></a>
-<span class="sourceLineNo">479</span>  }<a name="line.479"></a>
-<span class="sourceLineNo">480</span><a name="line.480"></a>
-<span class="sourceLineNo">481</span>  /**<a name="line.481"></a>
-<span class="sourceLineNo">482</span>   * Initializes the HMaster. The steps are as follows:<a name="line.482"></a>
-<span class="sourceLineNo">483</span>   * &lt;p&gt;<a name="line.483"></a>
-<span class="sourceLineNo">484</span>   * &lt;ol&gt;<a name="line.484"></a>
-<span class="sourceLineNo">485</span>   * &lt;li&gt;Initialize the local HRegionServer<a name="line.485"></a>
-<span class="sourceLineNo">486</span>   * &lt;li&gt;Start the ActiveMasterManager.<a name="line.486"></a>
-<span class="sourceLineNo">487</span>   * &lt;/ol&gt;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>   * &lt;p&gt;<a name="line.488"></a>
-<span class="sourceLineNo">489</span>   * Remaining steps of initialization occur in<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * #finishActiveMasterInitialization(MonitoredTask) after<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * the master becomes the active one.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public HMaster(final Configuration conf)<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      throws IOException, KeeperException {<a name="line.494"></a>
-<span class="sourceLineNo">495</span>    super(conf);<a name="line.495"></a>
-<span class="sourceLineNo">496</span>    TraceUtil.initTracer(conf);<a name="line.496"></a>
-<span class="sourceLineNo">497</span>    try {<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      this.rsFatals = new MemoryBoundedLogMessageBuffer(<a name="line.498"></a>
-<span class="sourceLineNo">499</span>          conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      LOG.info("hbase.rootdir=" + getRootDir() +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>          ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));<a name="line.501"></a>
-<span class="sourceLineNo">502</span><a name="line.502"></a>
-<span class="sourceLineNo">503</span>      // Disable usage of meta replicas in the master<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      decorateMasterConfiguration(this.conf);<a name="line.506"></a>
-<span class="sourceLineNo">507</span><a name="line.507"></a>
-<span class="sourceLineNo">508</span>      // Hack! Maps DFSClient =&gt; Master for logs.  HDFS made this<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      // config param for task trackers, but we can piggyback off of it.<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      if (this.conf.get("mapreduce.task.attempt.id") == null) {<a name="line.510"></a>
-<span class="sourceLineNo">511</span>        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      }<a name="line.512"></a>
-<span class="sourceLineNo">513</span><a name="line.513"></a>
-<span class="sourceLineNo">514</span>      // should we check the compression codec type at master side, default true, HBASE-6370<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);<a name="line.515"></a>
-<span class="sourceLineNo">516</span><a name="line.516"></a>
-<span class="sourceLineNo">517</span>      // should we check encryption settings at master side, default true<a name="line.517"></a>
-<span class="sourceLineNo">518</span>      this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);<a name="line.518"></a>
-<span class="sourceLineNo">519</span><a name="line.519"></a>
-<span class="sourceLineNo">520</span>      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));<a name="line.520"></a>
-<span class="sourceLineNo">521</span><a name="line.521"></a>
-<span class="sourceLineNo">522</span>      // preload table descriptor at startup<a name="line.522"></a>
-<span class="sourceLineNo">523</span>      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>      this.maxBlancingTime = getMaxBalancingTime();<a name="line.525"></a>
-<span class="sourceLineNo">526</span>      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,<a name="line.526"></a>
-<span class="sourceLineNo">527</span>          HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);<a name="line.527"></a>
-<span class="sourceLineNo">528</span><a name="line.528"></a>
-<span class="sourceLineNo">529</span>      // Do we publish the status?<a name="line.529"></a>
-<span class="sourceLineNo">530</span><a name="line.530"></a>
-<span class="sourceLineNo">531</span>      boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,<a name="line.531"></a>
-<span class="sourceLineNo">532</span>          HConstants.STATUS_PUBLISHED_DEFAULT);<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      Class&lt;? extends ClusterStatusPublisher.Publisher&gt; publisherClass =<a name="line.533"></a>
-<span class="sourceLineNo">534</span>          conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,<a name="line.534"></a>
-<span class="sourceLineNo">535</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,<a name="line.535"></a>
-<span class="sourceLineNo">536</span>              ClusterStatusPublisher.Publisher.class);<a name="line.536"></a>
-<span class="sourceLineNo">537</span><a name="line.537"></a>
-<span class="sourceLineNo">538</span>      if (shouldPublish) {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        if (publisherClass == null) {<a name="line.539"></a>
-<span class="sourceLineNo">540</span>          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +<a name="line.540"></a>
-<span class="sourceLineNo">541</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +<a name="line.541"></a>
-<span class="sourceLineNo">542</span>              " is not set - not publishing status");<a name="line.542"></a>
-<span class="sourceLineNo">543</span>        } else {<a name="line.543"></a>
-<span class="sourceLineNo">544</span>          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>          getChoreService().scheduleChore(clusterStatusPublisherChore);<a name="line.545"></a>
-<span class="sourceLineNo">546</span>        }<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      }<a name="line.547"></a>
-<span class="sourceLineNo">548</span><a name="line.548"></a>
-<span class="sourceLineNo">549</span>      // Some unit tests don't need a cluster, so no zookeeper at all<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      } else {<a name="line.552"></a>
-<span class="sourceLineNo">553</span>        this.activeMasterManager = null;<a name="line.553"></a>
-<span class="sourceLineNo">554</span>      }<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    } catch (Throwable t) {<a name="line.555"></a>
-<span class="sourceLineNo">556</span>      // Make sure we log the exception. HMaster is often started via reflection and the<a name="line.556"></a>
-<span class="sourceLineNo">557</span>      // cause of failed startup is lost.<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      LOG.error("Failed construction of Master", t);<a name="line.558"></a>
-<span class="sourceLineNo">559</span>      throw t;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>  }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>  @Override<a name="line.563"></a>
-<span class="sourceLineNo">564</span>  protected String getUseThisHostnameInstead(Configuration conf) {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    return conf.get(MASTER_HOSTNAME_KEY);<a name="line.565"></a>
-<span class="sourceLineNo">566</span>  }<a name="line.566"></a>
-<span class="sourceLineNo">567</span><a name="line.567"></a>
-<span class="sourceLineNo">568</span>  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  // block in here until then.<a name="line.569"></a>
-<span class="sourceLineNo">570</span>  @Override<a name="line.570"></a>
-<span class="sourceLineNo">571</span>  public void run() {<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    try {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>        Threads.setDaemonThreadRunning(new Thread(() -&gt; {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>          try {<a name="line.575"></a>
-<span class="sourceLineNo">576</span>            int infoPort = putUpJettyServer();<a name="line.576"></a>
-<span class="sourceLineNo">577</span>            startActiveMasterManager(infoPort);<a name="line.577"></a>
-<span class="sourceLineNo">578</span>          } catch (Throwable t) {<a name="line.578"></a>
-<span class="sourceLineNo">579</span>            // Make sure we log the exception.<a name="line.579"></a>
-<span class="sourceLineNo">580</span>            String error = "Failed to become Active Master";<a name="line.580"></a>
-<span class="sourceLineNo">581</span>            LOG.error(error, t);<a name="line.581"></a>
-<span class="sourceLineNo">582</span>            // Abort should have been called already.<a name="line.582"></a>
-<span class="sourceLineNo">583</span>            if (!isAborted()) {<a name="line.583"></a>
-<span class="sourceLineNo">584</span>              abort(error, t);<a name="line.584"></a>
-<span class="sourceLineNo">585</span>            }<a name="line.585"></a>
-<span class="sourceLineNo">586</span>          }<a name="line.586"></a>
-<span class="sourceLineNo">587</span>        }));<a name="line.587"></a>
-<span class="sourceLineNo">588</span>      }<a name="line.588"></a>
-<span class="sourceLineNo">589</span>      // Fall in here even if we have been aborted. Need to run the shutdown services and<a name="line.589"></a>
-<span class="sourceLineNo">590</span>      // the super run call will do this for us.<a name="line.590"></a>
-<span class="sourceLineNo">591</span>      super.run();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    } finally {<a name="line.592"></a>
-<span class="sourceLineNo">593</span>      if (this.clusterSchemaService != null) {<a name="line.593"></a>
-<span class="sourceLineNo">594</span>        // If on way out, then we are no longer active master.<a name="line.594"></a>
-<span class="sourceLineNo">595</span>        this.clusterSchemaService.stopAsync();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>        try {<a name="line.596"></a>
-<span class="sourceLineNo">597</span>          this.clusterSchemaService.awaitTerminated(<a name="line.597"></a>
-<span class="sourceLineNo">598</span>              getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,<a name="line.598"></a>
-<span class="sourceLineNo">599</span>              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);<a name="line.599"></a>
-<span class="sourceLineNo">600</span>        } catch (TimeoutException te) {<a name="line.600"></a>
-<span class="sourceLineNo">601</span>          LOG.warn("Failed shutdown of clusterSchemaService", te);<a name="line.601"></a>
-<span class="sourceLineNo">602</span>        }<a name="line.602"></a>
-<span class="sourceLineNo">603</span>      }<a name="line.603"></a>
-<span class="sourceLineNo">604</span>      this.activeMaster = false;<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    }<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  }<a name="line.606"></a>
-<span class="sourceLineNo">607</span><a name="line.607"></a>
-<span class="sourceLineNo">608</span>  // return the actual infoPort, -1 means disable info server.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>  private int putUpJettyServer() throws IOException {<a name="line.609"></a>
-<span class="sourceLineNo">610</span>    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      return -1;<a name="line.611"></a>
-<span class="sourceLineNo">612</span>    }<a name="line.612"></a>
-<span class="sourceLineNo">613</span>    final int infoPort = conf.getInt("hbase.master.info.port.orig",<a name="line.613"></a>
-<span class="sourceLineNo">614</span>      HConstants.DEFAULT_MASTER_INFOPORT);<a name="line.614"></a>
-<span class="sourceLineNo">615</span>    // -1 is for disabling info server, so no redirecting<a name="line.615"></a>
-<span class="sourceLineNo">616</span>    if (infoPort &lt; 0 || infoServer == null) {<a name="line.616"></a>
-<span class="sourceLineNo">617</span>      return -1;<a name="line.617"></a>
-<span class="sourceLineNo">618</span>    }<a name="line.618"></a>
-<span class="sourceLineNo">619</span>    if(infoPort == infoServer.getPort()) {<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      return infoPort;<a name="line.620"></a>
-<span class="sourceLineNo">621</span>    }<a name="line.621"></a>
-<span class="sourceLineNo">622</span>    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");<a name="line.622"></a>
-<span class="sourceLineNo">623</span>    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {<a name="line.623"></a>
-<span class="sourceLineNo">624</span>      String msg =<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          "Failed to start redirecting jetty server. Address " + addr<a name="line.625"></a>
-<span class="sourceLineNo">626</span>              + " does not belong to this host. Correct configuration parameter: "<a name="line.626"></a>
-<span class="sourceLineNo">627</span>              + "hbase.master.info.bindAddress";<a name="line.627"></a>
-<span class="sourceLineNo">628</span>      LOG.error(msg);<a name="line.628"></a>
-<span class="sourceLineNo">629</span>      throw new IOException(msg);<a name="line.629"></a>
-<span class="sourceLineNo">630</span>    }<a name="line.630"></a>
-<span class="sourceLineNo">631</span><a name="line.631"></a>
-<span class="sourceLineNo">632</span>    // TODO I'm pretty sure we could just add another binding to the InfoServer run by<a name="line.632"></a>
-<span class="sourceLineNo">633</span>    // the RegionServer and have it run the RedirectServlet instead of standing up<a name="line.633"></a>
-<span class="sourceLineNo">634</span>    // a second entire stack here.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>    masterJettyServer = new Server();<a name="line.635"></a>
-<span class="sourceLineNo">636</span>    final ServerConnector connector = new ServerConnector(masterJettyServer);<a name="line.636"></a>
-<span class="sourceLineNo">637</span>    connector.setHost(addr);<a name="line.637"></a>
-<span class="sourceLineNo">638</span>    connector.setPort(infoPort);<a name="line.638"></a>
-<span class="sourceLineNo">639</span>    masterJettyServer.addConnector(connector);<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    masterJettyServer.setStopAtShutdown(true);<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    final String redirectHostname =<a name="line.642"></a>
-<span class="sourceLineNo">643</span>        StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;<a name="line.643"></a>
-<span class="sourceLineNo">644</span><a name="line.644"></a>
-<span class="sourceLineNo">645</span>    final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    context.addServlet(new ServletHolder(redirect), "/*");<a name="line.647"></a>
-<span class="sourceLineNo">648</span>    context.setServer(masterJettyServer);<a name="line.648"></a>
-<span class="sourceLineNo">649</span><a name="line.649"></a>
-<span class="sourceLineNo">650</span>    try {<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      masterJettyServer.start();<a name="line.651"></a>
-<span class="sourceLineNo">652</span>    } catch (Exception e) {<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      throw new IOException("Failed to start redirecting jetty server", e);<a name="line.653"></a>
-<span class="sourceLineNo">654</span>    }<a name="line.654"></a>
-<span class="sourceLineNo">655</span>    return connector.getLocalPort();<a name="line.655"></a>
-<span class="sourceLineNo">656</span>  }<a name="line.656"></a>
-<span class="sourceLineNo">657</span><a name="line.657"></a>
-<span class="sourceLineNo">658</span>  @Override<a name="line.658"></a>
-<span class="sourceLineNo">659</span>  protected Function&lt;TableDescriptorBuilder, TableDescriptorBuilder&gt; getMetaTableObserver() {<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    return builder -&gt; builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));<a name="line.660"></a>
-<span class="sourceLineNo">661</span>  }<a name="line.661"></a>
-<span class="sourceLineNo">662</span>  /**<a name="line.662"></a>
-<span class="sourceLineNo">663</span>   * For compatibility, if failed with regionserver credentials, try the master one<a name="line.663"></a>
-<span class="sourceLineNo">664</span>   */<a name="line.664"></a>
-<span class="sourceLineNo">665</span>  @Override<a name="line.665"></a>
-<span class="sourceLineNo">666</span>  protected void login(UserProvider user, String host) throws IOException {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>    try {<a name="line.667"></a>
-<span class="sourceLineNo">668</span>      super.login(user, host);<a name="line.668"></a>
-<span class="sourceLineNo">669</span>    } catch (IOException ie) {<a name="line.669"></a>
-<span class="sourceLineNo">670</span>      user.login("hbase.master.keytab.file",<a name="line.670"></a>
-<span class="sourceLineNo">671</span>        "hbase.master.kerberos.principal", host);<a name="line.671"></a>
-<span class="sourceLineNo">672</span>    }<a name="line.672"></a>
-<span class="sourceLineNo">673</span>  }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>  /**<a name="line.675"></a>
-<span class="sourceLineNo">676</span>   * If configured to put regions on active master,<a name="line.676"></a>
-<span class="sourceLineNo">677</span>   * wait till a backup master becomes active.<a name="line.677"></a>
-<span class="sourceLineNo">678</span>   * Otherwise, loop till the server is stopped or aborted.<a name="line.678"></a>
-<span class="sourceLineNo">679</span>   */<a name="line.679"></a>
-<span class="sourceLineNo">680</span>  @Override<a name="line.680"></a>
-<span class="sourceLineNo">681</span>  protected void waitForMasterActive(){<a name="line.681"></a>
-<span class="sourceLineNo">682</span>    boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);<a name="line.682"></a>
-<span class="sourceLineNo">683</span>    while (!(tablesOnMaster &amp;&amp; activeMaster) &amp;&amp; !isStopped() &amp;&amp; !isAborted()) {<a name="line.683"></a>
-<span class="sourceLineNo">684</span>      sleeper.sleep();<a name="line.684"></a>
-<span class="sourceLineNo">685</span>    }<a name="line.685"></a>
-<span class="sourceLineNo">686</span>  }<a name="line.686"></a>
-<span class="sourceLineNo">687</span><a name="line.687"></a>
-<span class="sourceLineNo">688</span>  @VisibleForTesting<a name="line.688"></a>
-<span class="sourceLineNo">689</span>  public MasterRpcServices getMasterRpcServices() {<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    return (MasterRpcServices)rpcServices;<a name="line.690"></a>
-<span class="sourceLineNo">691</span>  }<a name="line.691"></a>
-<span class="sourceLineNo">692</span><a name="line.692"></a>
-<span class="sourceLineNo">693</span>  public boolean balanceSwitch(final boolean b) throws IOException {<a name="line.693"></a>
-<span class="sourceLineNo">694</span>    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);<a name="line.694"></a>
-<span class="sourceLineNo">695</span>  }<a name="line.695"></a>
-<span class="sourceLineNo">696</span><a name="line.696"></a>
-<span class="sourceLineNo">697</span>  @Override<a name="line.697"></a>
-<span class="sourceLineNo">698</span>  protected String getProcessName() {<a name="line.698"></a>
-<span class="sourceLineNo">699</span>    return MASTER;<a name="line.699"></a>
-<span class="sourceLineNo">700</span>  }<a name="line.700"></a>
-<span class="sourceLineNo">701</span><a name="line.701"></a>
-<span class="sourceLineNo">702</span>  @Override<a name="line.702"></a>
-<span class="sourceLineNo">703</span>  protected boolean canCreateBaseZNode() {<a name="line.703"></a>
-<span class="sourceLineNo">704</span>    return true;<a name="line.704"></a>
-<span class="sourceLineNo">705</span>  }<a name="line.705"></a>
-<span class="sourceLineNo">706</span><a name="line.706"></a>
-<span class="sourceLineNo">707</span>  @Override<a name="line.707"></a>
-<span class="sourceLineNo">708</span>  protected boolean canUpdateTableDescriptor() {<a name="line.708"></a>
-<span class="sourceLineNo">709</span>    return true;<a name="line.709"></a>
-<span class="sourceLineNo">710</span>  }<a name="line.710"></a>
-<span class="sourceLineNo">711</span><a name="line.711"></a>
-<span class="sourceLineNo">712</span>  @Override<a name="line.712"></a>
-<span class="sourceLineNo">713</span>  protected RSRpcServices createRpcServices() throws IOException {<a name="line.713"></a>
-<span class="sourceLineNo">714</span>    return new MasterRpcServices(this);<a name="line.714"></a>
-<span class="sourceLineNo">715</span>  }<a name="line.715"></a>
-<span class="sourceLineNo">716</span><a name="line.716"></a>
-<span class="sourceLineNo">717</span>  @Override<a name="line.717"></a>
-<span class="sourceLineNo">718</span>  protected void configureInfoServer() {<a name="line.718"></a>
-<span class="sourceLineNo">719</span>    infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);<a name="line.719"></a>
-<span class="sourceLineNo">720</span>    infoServer.setAttribute(MASTER, this);<a name="line.720"></a>
-<span class="sourceLineNo">721</span>    if (LoadBalancer.isTablesOnMaster(conf)) {<a name="line.721"></a>
-<span class="sourceLineNo">722</span>      super.configureInfoServer();<a name="line.722"></a>
-<span class="sourceLineNo">723</span>    }<a name="line.723"></a>
-<span class="sourceLineNo">724</span>  }<a name="line.724"></a>
-<span class="sourceLineNo">725</span><a name="line.725"></a>
-<span class="sourceLineNo">726</span>  @Override<a name="line.726"></a>
-<span class="sourceLineNo">727</span>  protected Class&lt;? extends HttpServlet&gt; getDumpServlet() {<a name="line.727"></a>
-<span class="sourceLineNo">728</span>    return MasterDumpServlet.class;<a name="line.728"></a>
-<span class="sourceLineNo">729</span>  }<a name="line.729"></a>
-<span class="sourceLineNo">730</span><a name="line.730"></a>
-<span class="sourceLineNo">731</span>  @Override<a name="line.731"></a>
-<span class="sourceLineNo">732</span>  public MetricsMaster getMasterMetrics() {<a name="line.732"></a>
-<span class="sourceLineNo">733</span>    return metricsMaster;<a name="line.733"></a>
-<span class="sourceLineNo">734</span>  }<a name="line.734"></a>
-<span class="sourceLineNo">735</span><a name="line.735"></a>
-<span class="sourceLineNo">736</span>  /**<a name="line.736"></a>
-<span class="sourceLineNo">737</span>   * &lt;p&gt;<a name="line.737"></a>
-<span class="sourceLineNo">738</span>   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it<a name="line.738"></a>
-<span class="sourceLineNo">739</span>   * should have already been initialized along with {@link ServerManager}.<a name="line.739"></a>
-<span class="sourceLineNo">740</span>   * &lt;/p&gt;<a name="line.740"></a>
-<span class="sourceLineNo">741</span>   * &lt;p&gt;<a name="line.741"></a>
-<span class="sourceLineNo">742</span>   * Will be overridden in tests.<a name="line.742"></a>
-<span class="sourceLineNo">743</span>   * &lt;/p&gt;<a name="line.743"></a>
-<span class="sourceLineNo">744</span>   */<a name="line.744"></a>
-<span class="sourceLineNo">745</span>  @VisibleForTesting<a name="line.745"></a>
-<span class="sourceLineNo">746</span>  protected void initializeZKBasedSystemTrackers()<a name="line.746"></a>
-<span class="sourceLineNo">747</span>      throws IOException, InterruptedException, KeeperException, ReplicationException {<a name="line.747"></a>
-<span class="sourceLineNo">748</span>    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);<a name="line.748"></a>
-<span class="sourceLineNo">749</span>    this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);<a name="line.749"></a>
-<span class="sourceLineNo">750</span>    this.normalizer.setMasterServices(this);<a name="line.750"></a>
-<span class="sourceLineNo">751</span>    this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);<a name="line.751"></a>
-<span class="sourceLineNo">752</span>    this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);<a name="line.752"></a>
-<span class="sourceLineNo">753</span>    this.loadBalancerTracker.start();<a name="line.753"></a>
-<span class="sourceLineNo">754</span><a name="line.754"></a>
-<span class="sourceLineNo">755</span>    this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    this.regionNormalizerTracker.start();<a name="line.756"></a>
-<span class="sourceLineNo">757</span><a name="line.757"></a>
-<span class="sourceLineNo">758</span>    this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);<a name="line.758"></a>
-<span class="sourceLineNo">759</span>    this.splitOrMergeTracker.start();<a name="line.759"></a>
-<span class="sourceLineNo">760</span><a name="line.760"></a>
-<span class="sourceLineNo">761</span>    this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);<a name="line.761"></a>
-<span class="sourceLineNo">762</span>    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);<a name="line.762"></a>
-<span class="sourceLineNo">763</span><a name="line.763"></a>
-<span class="sourceLineNo">764</span>    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);<a name="line.764"></a>
-<span class="sourceLineNo">765</span>    this.drainingServerTracker.start();<a name="line.765"></a>
-<span class="sourceLineNo">766</span><a name="line.766"></a>
-<span class="sourceLineNo">767</span>    this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);<a name="line.767"></a>
-<span class="sourceLineNo">768</span>    this.maintenanceModeTracker.start();<a name="line.768"></a>
-<span class="sourceLineNo">769</span><a name="line.769"></a>
-<span class="sourceLineNo">770</span>    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);<a name="line.770"></a>
-<span class="sourceLineNo">771</span>    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,<a name="line.771"></a>
-<span class="sourceLineNo">772</span>      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);<a name="line.772"></a>
-<span class="sourceLineNo">773</span>    if (clientQuorumServers != null &amp;&amp; !clientZkObserverMode) {<a name="line.773"></a>
-<span class="sourceLineNo">774</span>      // we need to take care of the ZK information synchronization<a name="line.774"></a>
-<span class="sourceLineNo">775</span>      // if given client ZK are not observer nodes<a name="line.775"></a>
-<span class="sourceLineNo">776</span>      ZKWatcher clientZkWatcher = new ZKWatcher(conf,<a name="line.776"></a>
-<span class="sourceLineNo">777</span>          getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,<a name="line.777"></a>
-<span class="sourceLineNo">778</span>          false, true);<a name="line.778"></a>
-<span class="sourceLineNo">779</span>      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);<a name="line.779"></a>
-<span class="sourceLineNo">780</span>      this.metaLocationSyncer.start();<a name="line.780"></a>
-<span class="sourceLineNo">781</span>      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);<a name="line.781"></a>
-<span class="sourceLineNo">782</span>      this.masterAddressSyncer.start();<a name="line.782"></a>
-<span class="sourceLineNo">783</span>      // set cluster id is a one-go effort<a name="line.783"></a>
-<span class="sourceLineNo">784</span>      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());<a name="line.784"></a>
-<span class="sourceLineNo">785</span>    }<a name="line.785"></a>
-<span class="sourceLineNo">786</span><a name="line.786"></a>
-<span class="sourceLineNo">787</span>    // Set the cluster as up.  If new RSs, they'll be waiting on this before<a name="line.787"></a>
-<span class="sourceLineNo">788</span>    // going ahead with their startup.<a name="line.788"></a>
-<span class="sourceLineNo">789</span>    boolean wasUp = this.clusterStatusTracker.isClusterUp();<a name="line.789"></a>
-<span class="sourceLineNo">790</span>    if (!wasUp) this.clusterStatusTracker.setClusterUp();<a name="line.790"></a>
-<span class="sourceLineNo">791</span><a name="line.791"></a>
-<span class="sourceLineNo">792</span>    LOG.info("Active/primary master=" + this.serverName +<a name="line.792"></a>
-<span class="sourceLineNo">793</span>        ", sessionid=0x" +<a name="line.793"></a>
-<span class="sourceLineNo">794</span>        Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +<a name="line.794"></a>
-<span class="sourceLineNo">795</span>        ", setting cluster-up flag (Was=" + wasUp + ")");<a name="line.795"></a>
-<span class="sourceLineNo">796</span><a name="line.796"></a>
-<span class="sourceLineNo">797</span>    // create/initialize the snapshot manager and other procedure managers<a name="line.797"></a>
-<span class="sourceLineNo">798</span>    this.snapshotManager = new SnapshotManager();<a name="line.798"></a>
-<span class="sourceLineNo">799</span>    this.mpmHost = new MasterProcedureManagerHost();<a name="line.799"></a>
-<span class="sourceLineNo">800</span>    this.mpmHost.register(this.snapshotManager);<a name="line.800"></a>
-<span class="sourceLineNo">801</span>    this.mpmHost.register(new MasterFlushTableProcedureManager());<a name="line.801"></a>
-<span class="sourceLineNo">802</span>    this.mpmHost.loadProcedures(conf);<a name="line.802"></a>
-<span class="sourceLineNo">803</span>    this.mpmHost.initialize(this, this.metricsMaster);<a name="line.803"></a>
-<span class="sourceLineNo">804</span>  }<a name="line.804"></a>
-<span class="sourceLineNo">805</span><a name="line.805"></a>
-<span class="sourceLineNo">806</span>  private static final ImmutableSet&lt;Class&lt;?&gt;&gt; UNSUPPORTED_PROCEDURES =<a name="line.806"></a>
-<span class="sourceLineNo">807</span>    ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class,<a name="line.807"></a>
-<span class="sourceLineNo">808</span>      MoveRegionProcedure.class);<a name="line.808"></a>
-<span class="sourceLineNo">809</span><a name="line.809"></a>
-<span class="sourceLineNo">810</span>  /**<a name="line.810"></a>
-<span class="sourceLineNo">811</span>   * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is<a name="line.811"></a>
-<span class="sourceLineNo">812</span>   * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to<a name="line.812"></a>
-<span class="sourceLineNo">813</span>   * make sure that there are none these procedures when upgrading. If there are, the master will<a name="line.813"></a>
-<span class="sourceLineNo">814</span>   * quit, you need to go back to the old version to finish these procedures first before upgrading.<a name="line.814"></a>
-<span class="sourceLineNo">815</span>   */<a name="line.815"></a>
-<span class="sourceLineNo">816</span>  private void checkUnsupportedProcedure(<a name="line.816"></a>
-<span class="sourceLineNo">817</span>      Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType)<a name="line.817"></a>
-<span class="sourceLineNo">818</span>      throws HBaseIOException {<a name="line.818"></a>
-<span class="sourceLineNo">819</span>    // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to<a name="line.819"></a>
-<span class="sourceLineNo">820</span>    // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as<a name="line.820"></a>
-<span class="sourceLineNo">821</span>    // there will be conflict in the code for AM. We should finish all these procedures before<a name="line.821"></a>
-<span class="sourceLineNo">822</span>    // upgrading.<a name="line.822"></a>
-<span class="sourceLineNo">823</span>    for (Class&lt;?&gt; clazz : UNSUPPORTED_PROCEDURES) {<a name="line.823"></a>
-<span class="sourceLineNo">824</span>      List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt; procs = procsByType.get(clazz);<a name="line.824"></a>
-<span class="sourceLineNo">825</span>      if (procs != null) {<a name="line.825"></a>
-<span class="sourceLineNo">826</span>        LOG.error(<a name="line.826"></a>
-<span class="sourceLineNo">827</span>          "Unsupported procedure type {} found, please rollback your master to the old" +<a name="line.827"></a>
-<span class="sourceLineNo">828</span>            " version to finish them, and then try to upgrade again. The full procedure list: {}",<a name="line.828"></a>
-<span class="sourceLineNo">829</span>          clazz, procs);<a name="line.829"></a>
-<span class="sourceLineNo">830</span>        throw new HBaseIOException("Unsupported procedure type " + clazz + " found");<a name="line.830"></a>
-<span class="sourceLineNo">831</span>      }<a name="line.831"></a>
-<span class="sourceLineNo">832</span>    }<a name="line.832"></a>
-<span class="sourceLineNo">833</span>    // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to<a name="line.833"></a>
-<span class="sourceLineNo">834</span>    // make sure that no one will try to schedule it but SCP does have a state which will schedule<a name="line.834"></a>
-<span class="sourceLineNo">835</span>    // it.<a name="line.835"></a>
-<span class="sourceLineNo">836</span>    if (procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.836"></a>
-<span class="sourceLineNo">837</span>      .map(p -&gt; (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState)) {<a name="line.837"></a>
-<span class="sourceLineNo">838</span>      LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," +<a name="line.838"></a>
-<span class="sourceLineNo">839</span>        " which is not supported any more. Please rollback your master to the old version to" +<a name="line.839"></a>
-<span class="sourceLineNo">840</span>        " finish them, and then try to upgrade again.");<a name="line.840"></a>
-<span class="sourceLineNo">841</span>      throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure");<a name="line.841"></a>
-<span class="sourceLineNo">842</span>    }<a name="line.842"></a>
-<span class="sourceLineNo">843</span>  }<a name="line.843"></a>
-<span class="sourceLineNo">844</span><a name="line.844"></a>
-<span class="sourceLineNo">845</span>  /**<a name="line.845"></a>
-<span class="sourceLineNo">846</span>   * Finish initialization of HMaster after becoming the primary master.<a name="line.846"></a>
-<span class="sourceLineNo">847</span>   * &lt;p/&gt;<a name="line.847"></a>
-<span class="sourceLineNo">848</span>   * The startup order is a bit complicated but very important, do not change it unless you know<a name="line.848"></a>
-<span class="sourceLineNo">849</span>   * what you are doing.<a name="line.849"></a>
-<span class="sourceLineNo">850</span>   * &lt;ol&gt;<a name="line.850"></a>
-<span class="sourceLineNo">851</span>   * &lt;li&gt;Initialize file system based components - file system manager, wal manager, table<a name="line.851"></a>
-<span class="sourceLineNo">852</span>   * descriptors, etc&lt;/li&gt;<a name="line.852"></a>
-<span class="sourceLineNo">853</span>   * &lt;li&gt;Publish cluster id&lt;/li&gt;<a name="line.853"></a>
-<span class="sourceLineNo">854</span>   * &lt;li&gt;Here comes the most complicated part - initialize server manager, assignment manager and<a name="line.854"></a>
-<span class="sourceLineNo">855</span>   * region server tracker<a name="line.855"></a>
-<span class="sourceLineNo">856</span>   * &lt;ol type='i'&gt;<a name="line.856"></a>
-<span class="sourceLineNo">857</span>   * &lt;li&gt;Create server manager&lt;/li&gt;<a name="line.857"></a>
-<span class="sourceLineNo">858</span>   * &lt;li&gt;Create procedure executor, load the procedures, but do not start workers. We will start it<a name="line.858"></a>
-<span class="sourceLineNo">859</span>   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same<a name="line.859"></a>
-<span class="sourceLineNo">860</span>   * server&lt;/li&gt;<a name="line.860"></a>
-<span class="sourceLineNo">861</span>   * &lt;li&gt;Create assignment manager and start it, load the meta region state, but do not load data<a name="line.861"></a>
-<span class="sourceLineNo">862</span>   * from meta region&lt;/li&gt;<a name="line.862"></a>
-<span class="sourceLineNo">863</span>   * &lt;li&gt;Start region server tracker, construct the online servers set and find out dead servers and<a name="line.863"></a>
-<span class="sourceLineNo">864</span>   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also<a name="line.864"></a>
-<span class="sourceLineNo">865</span>   * scan the wal directory to find out possible live region servers, and the differences between<a name="line.865"></a>
-<span class="sourceLineNo">866</span>   * these two sets are the dead servers&lt;/li&gt;<a name="line.866"></a>
-<span class="sourceLineNo">867</span>   * &lt;/ol&gt;<a name="line.867"></a>
-<span class="sourceLineNo">868</span>   * &lt;/li&gt;<a name="line.868"></a>
-<span class="sourceLineNo">869</span>   * &lt;li&gt;If this is a new deploy, schedule a InitMetaProcedure to initialize meta&lt;/li&gt;<a name="line.869"></a>
-<span class="sourceLineNo">870</span>   * &lt;li&gt;Start necessary service threads - balancer, catalog janior, executor services, and also the<a name="line.870"></a>
-<span class="sourceLineNo">871</span>   * procedure executor, etc. Notice that the balancer must be created first as assignment manager<a name="line.871"></a>
-<span class="sourceLineNo">872</span>   * may use it when assigning regions.&lt;/li&gt;<a name="line.872"></a>
-<span class="sourceLineNo">873</span>   * &lt;li&gt;Wait for meta to be initialized if necesssary, start table state manager.&lt;/li&gt;<a name="line.873"></a>
-<span class="sourceLineNo">874</span>   * &lt;li&gt;Wait for enough region servers to check-in&lt;/li&gt;<a name="line.874"></a>
-<span class="sourceLineNo">875</span>   * &lt;li&gt;Let assignment manager load data from meta and construct region states&lt;/li&gt;<a name="line.875"></a>
-<span class="sourceLineNo">876</span>   * &lt;li&gt;Start all other things such as chore services, etc&lt;/li&gt;<a name="line.876"></a>
-<span class="sourceLineNo">877</span>   * &lt;/ol&gt;<a name="line.877"></a>
-<span class="sourceLineNo">878</span>   * &lt;p/&gt;<a name="line.878"></a>
-<span class="sourceLineNo">879</span>   * Notice that now we will not schedule a special procedure to make meta online(unless the first<a name="line.879"></a>
-<span class="sourceLineNo">880</span>   * time where meta has not been created yet), we will rely on SCP to bring meta online.<a name="line.880"></a>
-<span class="sourceLineNo">881</span>   */<a name="line.881"></a>
-<span class="sourceLineNo">882</span>  private void finishActiveMasterInitialization(MonitoredTask status) throws IOException,<a name="line.882"></a>
-<span class="sourceLineNo">883</span>          InterruptedException, KeeperException, ReplicationException {<a name="line.883"></a>
-<span class="sourceLineNo">884</span>    Thread zombieDetector = new Thread(new InitializationMonitor(this),<a name="line.884"></a>
-<span class="sourceLineNo">885</span>        "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());<a name="line.885"></a>
-<span class="sourceLineNo">886</span>    zombieDetector.setDaemon(true);<a name="line.886"></a>
-<span class="sourceLineNo">887</span>    zombieDetector.start();<a name="line.887"></a>
-<span class="sourceLineNo">888</span><a name="line.888"></a>
-<span class="sourceLineNo">889</span>    /*<a name="line.889"></a>
-<span class="sourceLineNo">890</span>     * We are active master now... go initialize components we need to run.<a name="line.890"></a>
-<span class="sourceLineNo">891</span>     */<a name="line.891"></a>
-<span class="sourceLineNo">892</span>    status.setStatus("Initializing Master file system");<a name="line.892"></a>
-<span class="sourceLineNo">893</span><a name="line.893"></a>
-<span class="sourceLineNo">894</span>    this.masterActiveTime = System.currentTimeMillis();<a name="line.894"></a>
-<span class="sourceLineNo">895</span>    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.<a name="line.895"></a>
-<span class="sourceLineNo">896</span>    // Initialize the chunkCreator<a name="line.896"></a>
-<span class="sourceLineNo">897</span>    initializeMemStoreChunkCreator();<a name="line.897"></a>
-<span class="sourceLineNo">898</span>    this.fileSystemManager = new MasterFileSystem(conf);<a name="line.898"></a>
-<span class="sourceLineNo">899</span>    this.walManager = new MasterWalManager(this);<a name="line.899"></a>
-<span class="sourceLineNo">900</span><a name="line.900"></a>
-<span class="sourceLineNo">901</span>    // enable table descriptors cache<a name="line.901"></a>
-<span class="sourceLineNo">902</span>    this.tableDescriptors.setCacheOn();<a name="line.902"></a>
-<span class="sourceLineNo">903</span><a name="line.903"></a>
-<span class="sourceLineNo">904</span>    // warm-up HTDs cache on master initialization<a name="line.904"></a>
-<span class="sourceLineNo">905</span>    if (preLoadTableDescriptors) {<a name="line.905"></a>
-<span class="sourceLineNo">906</span>      status.setStatus("Pre-loading table descriptors");<a name="line.906"></a>
-<span class="sourceLineNo">907</span>      this.tableDescriptors.getAll();<a name="line.907"></a>
-<span class="sourceLineNo">908</span>    }<a name="line.908"></a>
-<span class="sourceLineNo">909</span><a name="line.909"></a>
-<span class="sourceLineNo">910</span>    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but<a name="line.910"></a>
-<span class="sourceLineNo">911</span>    // only after it has checked in with the Master. At least a few tests ask Master for clusterId<a name="line.911"></a>
-<span class="sourceLineNo">912</span>    // before it has called its run method and before RegionServer has done the reportForDuty.<a name="line.912"></a>
-<span class="sourceLineNo">913</span>    ClusterId clusterId = fileSystemManager.getClusterId();<a name="line.913"></a>
-<span class="sourceLineNo">914</span>    status.setStatus("Publishing Cluster ID " + clusterId + " in ZooKeeper");<a name="line.914"></a>
-<span class="sourceLineNo">915</span>    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());<a name="line.915"></a>
-<span class="sourceLineNo">916</span>    this.clusterId = clusterId.toString();<a name="line.916"></a>
-<span class="sourceLineNo">917</span><a name="line.917"></a>
-<span class="sourceLineNo">918</span>    status.setStatus("Initialze ServerManager and schedule SCP for crash servers");<a name="line.918"></a>
-<span class="sourceLineNo">919</span>    this.serverManager = createServerManager(this);<a name="line.919"></a>
-<span class="sourceLineNo">920</span>    createProcedureExecutor();<a name="line.920"></a>
-<span class="sourceLineNo">921</span>    @SuppressWarnings("rawtypes")<a name="line.921"></a>
-<span class="sourceLineNo">922</span>    Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType =<a name="line.922"></a>
-<span class="sourceLineNo">923</span>      procedureExecutor.getActiveProceduresNoCopy().stream()<a name="line.923"></a>
-<span class="sourceLineNo">924</span>        .collect(Collectors.groupingBy(p -&gt; p.getClass()));<a name="line.924"></a>
-<span class="sourceLineNo">925</span><a name="line.925"></a>
-<span class="sourceLineNo">926</span>    checkUnsupportedProcedure(procsByType);<a name="line.926"></a>
-<span class="sourceLineNo">927</span><a name="line.927"></a>
-<span class="sourceLineNo">928</span>    // Create Assignment Manager<a name="line.928"></a>
-<span class="sourceLineNo">929</span>    this.assignmentManager = new AssignmentManager(this);<a name="line.929"></a>
-<span class="sourceLineNo">930</span>    this.assignmentManager.start();<a name="line.930"></a>
-<span class="sourceLineNo">931</span>    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as<a name="line.931"></a>
-<span class="sourceLineNo">932</span>    // completed, it could still be in the procedure list. This is a bit strange but is another<a name="line.932"></a>
-<span class="sourceLineNo">933</span>    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.<a name="line.933"></a>
-<span class="sourceLineNo">934</span>    List&lt;TransitRegionStateProcedure&gt; ritList =<a name="line.934"></a>
-<span class="sourceLineNo">935</span>      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()<a name="line.935"></a>
-<span class="sourceLineNo">936</span>        .filter(p -&gt; !p.isFinished()).map(p -&gt; (TransitRegionStateProcedure) p)<a name="line.936"></a>
-<span class="sourceLineNo">937</span>        .collect(Collectors.toList());<a name="line.937"></a>
-<span class="sourceLineNo">938</span>    this.assignmentManager.setupRIT(ritList);<a name="line.938"></a>
-<span class="sourceLineNo">939</span><a name="line.939"></a>
-<span class="sourceLineNo">940</span>    this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);<a name="line.940"></a>
-<span class="sourceLineNo">941</span>    this.regionServerTracker.start(<a name="line.941"></a>
-<span class="sourceLineNo">942</span>      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.942"></a>
-<span class="sourceLineNo">943</span>        .map(p -&gt; (ServerCrashProcedure) p).map(p -&gt; p.getServerName()).collect(Collectors.toSet()),<a name="line.943"></a>
-<span class="sourceLineNo">944</span>      walManager.getLiveServersFromWALDir());<a name="line.944"></a>
-<span class="sourceLineNo">945</span>    // This manager will be started AFTER hbase:meta is confirmed on line.<a name="line.945"></a>
-<span class="sourceLineNo">946</span>    // hbase.mirror.table.state.to.zookeeper is so hbase1 clients can connect. They read table<a name="line.946"></a>
-<span class="sourceLineNo">947</span>    // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients.<a name="line.947"></a>
-<span class="sourceLineNo">948</span>    this.tableStateManager =<a name="line.948"></a>
-<span class="sourceLineNo">949</span>      this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true)<a name="line.949"></a>
-<span class="sourceLineNo">950</span>        ?<a name="line.950"></a>
-<span class="sourceLineNo">951</span>        new MirroringTableStateManager(this):<a name="line.951"></a>
-<span class="sourceLineNo">952</span>        new TableStateManager(this);<a name="line.952"></a>
-<span class="sourceLineNo">953</span><a name="line.953"></a>
-<span class="sourceLineNo">954</span>    status.setStatus("Initializing ZK system trackers");<a name="line.954"></a>
-<span class="sourceLineNo">955</span>    initializeZKBasedSystemTrackers();<a name="line.955"></a>
-<span class="sourceLineNo">956</span>    status.setStatus("Loading last flushed sequence id of regions");<a name="line.956"></a>
-<span class="sourceLineNo">957</span>    try {<a name="line.957"></a>
-<span class="sourceLineNo">958</span>      this.serverManager.loadLastFlushedSequenceIds();<a name="line.958"></a>
-<span class="sourceLineNo">959</span>    } catch (IOException e) {<a name="line.959"></a>
-<span class="sourceLineNo">960</span>      LOG.debug("Failed to load last flushed sequence id of regions"<a name="line.960"></a>
-<span class="sourceLineNo">961</span>          + " from file system", e);<a name="line.961"></a>
-<span class="sourceLineNo">962</span>    }<a name="line.962"></a>
-<span class="sourceLineNo">963</span>    // Set ourselves as active Master now our claim has succeeded up in zk.<a name="line.963"></a>
-<span class="sourceLineNo">964</span>    this.activeMaster = true;<a name="line.964"></a>
-<span class="sourceLineNo">965</span><a name="line.965"></a>
-<span class="sourceLineNo">966</span>    // This is for backwards compatibility<a name="line.966"></a>
-<span class="sourceLineNo">967</span>    // See HBASE-11393<a name="line.967"></a>
-<span class="sourceLineNo">968</span>    status.setStatus("Update TableCFs node in ZNode");<a name="line.968"></a>
-<span class="sourceLineNo">969</span>    ReplicationPeerConfigUpgrader tableCFsUpdater =<a name="line.969"></a>
-<span class="sourceLineNo">970</span>        new ReplicationPeerConfigUpgrader(zooKeeper, conf);<a name="line.970"></a>
-<span class="sourceLineNo">971</span>    tableCFsUpdater.copyTableCFs();<a name="line.971"></a>
-<span class="sourceLineNo">972</span><a name="line.972"></a>
-<span class="sourceLineNo">973</span>    // Add the Observer to delete quotas on table deletion before starting all CPs by<a name="line.973"></a>
-<span class="sourceLineNo">974</span>    // default with quota support, avoiding if user specifically asks to not load this Observer.<a name="line.974"></a>
-<span class="sourceLineNo">975</span>    if (QuotaUtil.isQuotaEnabled(conf)) {<a name="line.975"></a>
-<span class="sourceLineNo">976</span>      updateConfigurationForQuotasObserver(conf);<a name="line.976"></a>
-<span class="sourceLineNo">977</span>    }<a name="line.977"></a>
-<span class="sourceLineNo">978</span>    // initialize master side coprocessors before we start handling requests<a name="line.978"></a>
-<span class="sourceLineNo">979</span>    status.setStatus("Initializing master coprocessors");<a name="line.979"></a>
-<span class="sourceLineNo">980</span>    this.cpHost = new MasterCoprocessorHost(this, this.conf);<a name="line.980"></a>
+<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HBaseFsck;<a name="line.196"></a>
+<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.197"></a>
+<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.198"></a>
+<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.199"></a>
+<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.200"></a>
+<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.201"></a>
+<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.203"></a>
+<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.204"></a>
+<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.207"></a>
+<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.211"></a>
+<span class="sourceLineNo">212</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.212"></a>
+<span class="sourceLineNo">213</span>import org.apache.zookeeper.KeeperException;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.Server;<a name="line.214"></a>
+<span class="sourceLineNo">215</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.215"></a>
+<span class="sourceLineNo">216</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.216"></a>
+<span class="sourceLineNo">217</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>import org.slf4j.Logger;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>import org.slf4j.LoggerFactory;<a name="line.219"></a>
+<span class="sourceLineNo">220</span><a name="line.220"></a>
+<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.224"></a>
+<span class="sourceLineNo">225</span><a name="line.225"></a>
+<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.226"></a>
+<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.227"></a>
+<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.228"></a>
+<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>/**<a name="line.233"></a>
+<span class="sourceLineNo">234</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.234"></a>
+<span class="sourceLineNo">235</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.235"></a>
+<span class="sourceLineNo">236</span> * run the cluster.  All others park themselves in their constructor until<a name="line.236"></a>
+<span class="sourceLineNo">237</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.237"></a>
+<span class="sourceLineNo">238</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.238"></a>
+<span class="sourceLineNo">239</span> *<a name="line.239"></a>
+<span class="sourceLineNo">240</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.240"></a>
+<span class="sourceLineNo">241</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.241"></a>
+<span class="sourceLineNo">242</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.242"></a>
+<span class="sourceLineNo">243</span> *<a name="line.243"></a>
+<span class="sourceLineNo">244</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.244"></a>
+<span class="sourceLineNo">245</span> *<a name="line.245"></a>
+<span class="sourceLineNo">246<

<TRUNCATED>

[07/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556

<TRUNCATED>

[10/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Gra

<TRUNCATED>

[37/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/c7b180e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/c7b180e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/c7b180e2

Branch: refs/heads/asf-site
Commit: c7b180e233a4b1f6b5b6b4f5b4d269268fac40d7
Parents: b8bc22f
Author: jenkins <bu...@apache.org>
Authored: Sat Aug 25 14:47:29 2018 +0000
Committer: jenkins <bu...@apache.org>
Committed: Sat Aug 25 14:47:29 2018 +0000

----------------------------------------------------------------------
 acid-semantics.html                             |     4 +-
 apache_hbase_reference_guide.pdf                |     4 +-
 book.html                                       |     2 +-
 bulk-loads.html                                 |     4 +-
 checkstyle-aggregate.html                       | 37038 ++++++++---------
 checkstyle.rss                                  |     4 +-
 coc.html                                        |     4 +-
 dependencies.html                               |     4 +-
 dependency-convergence.html                     |     4 +-
 dependency-info.html                            |     4 +-
 dependency-management.html                      |     4 +-
 devapidocs/allclasses-frame.html                |     1 +
 devapidocs/allclasses-noframe.html              |     1 +
 devapidocs/constant-values.html                 |     8 +-
 devapidocs/index-all.html                       |    20 +-
 .../hadoop/hbase/backup/package-tree.html       |     4 +-
 .../hadoop/hbase/client/package-tree.html       |    24 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |     2 +-
 .../hadoop/hbase/filter/package-tree.html       |     8 +-
 .../hadoop/hbase/io/hfile/package-tree.html     |     6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |     4 +-
 .../hadoop/hbase/mapreduce/package-tree.html    |     4 +-
 .../master/HMaster.InitializationMonitor.html   |    20 +-
 .../master/HMaster.MasterStoppedException.html  |     4 +-
 .../hbase/master/HMaster.RedirectServlet.html   |    12 +-
 .../master/HMaster.TableDescriptorGetter.html   |     4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |   480 +-
 .../hadoop/hbase/master/package-tree.html       |     4 +-
 .../hbase/master/procedure/package-tree.html    |     4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |    20 +-
 .../hadoop/hbase/procedure2/package-tree.html   |     4 +-
 .../hadoop/hbase/quotas/package-tree.html       |     8 +-
 .../hadoop/hbase/regionserver/package-tree.html |    20 +-
 .../regionserver/querymatcher/package-tree.html |     2 +-
 .../hbase/regionserver/wal/package-tree.html    |     2 +-
 .../hadoop/hbase/replication/package-tree.html  |     2 +-
 .../replication/regionserver/package-tree.html  |     2 +-
 .../hbase/security/access/package-tree.html     |     2 +-
 .../hadoop/hbase/security/package-tree.html     |     2 +-
 .../hadoop/hbase/thrift/package-tree.html       |     4 +-
 ...BaseFsck.CheckRegionConsistencyWorkItem.html |    10 +-
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html     |    80 +-
 .../hbase/util/HBaseFsck.ErrorReporter.html     |    30 +-
 .../hbase/util/HBaseFsck.FileLockCallable.html  |    63 +-
 .../hbase/util/HBaseFsck.HBaseFsckTool.html     |     6 +-
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |    56 +-
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |    14 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |    18 +-
 .../hbase/util/HBaseFsck.OnlineEntry.html       |    10 +-
 .../util/HBaseFsck.PrintingErrorReporter.html   |    42 +-
 .../HBaseFsck.RegionBoundariesInformation.html  |    16 +-
 .../util/HBaseFsck.RegionRepairException.html   |     8 +-
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |    22 +-
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |    20 +-
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  |    38 +-
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   |    12 +-
 .../util/HBaseFsck.WorkItemHdfsRegionInfo.html  |    12 +-
 .../util/HBaseFsck.WorkItemOverlapMerge.html    |    10 +-
 .../hbase/util/HBaseFsck.WorkItemRegion.html    |    16 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.html |   753 +-
 .../hadoop/hbase/util/class-use/Pair.html       |    17 +-
 .../hbase/util/class-use/RetryCounter.html      |    19 +-
 .../util/class-use/RetryCounterFactory.html     |    17 +
 .../apache/hadoop/hbase/util/package-frame.html |     1 +
 .../hadoop/hbase/util/package-summary.html      |     4 +
 .../apache/hadoop/hbase/util/package-tree.html  |     6 +-
 .../apache/hadoop/hbase/wal/package-tree.html   |     2 +-
 .../org/apache/hadoop/hbase/Version.html        |     6 +-
 .../master/HMaster.InitializationMonitor.html   |  7151 ++--
 .../master/HMaster.MasterStoppedException.html  |  7151 ++--
 .../hbase/master/HMaster.RedirectServlet.html   |  7151 ++--
 .../master/HMaster.TableDescriptorGetter.html   |  7151 ++--
 .../org/apache/hadoop/hbase/master/HMaster.html |  7151 ++--
 .../RegionStateStore.RegionStateVisitor.html    |     2 +-
 .../master/assignment/RegionStateStore.html     |     2 +-
 ...BaseFsck.CheckRegionConsistencyWorkItem.html | 10099 ++---
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html     | 10099 ++---
 .../hbase/util/HBaseFsck.ErrorReporter.html     | 10099 ++---
 .../hbase/util/HBaseFsck.FileLockCallable.html  | 10099 ++---
 .../hbase/util/HBaseFsck.HBaseFsckTool.html     | 10099 ++---
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   | 10099 ++---
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  | 10099 ++---
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  | 10099 ++---
 .../hbase/util/HBaseFsck.OnlineEntry.html       | 10099 ++---
 .../util/HBaseFsck.PrintingErrorReporter.html   | 10099 ++---
 .../HBaseFsck.RegionBoundariesInformation.html  | 10099 ++---
 .../util/HBaseFsck.RegionRepairException.html   | 10099 ++---
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html | 10099 ++---
 ...aseFsck.TableInfo.IntegrityFixSuggester.html | 10099 ++---
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  | 10099 ++---
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   | 10099 ++---
 .../util/HBaseFsck.WorkItemHdfsRegionInfo.html  | 10099 ++---
 .../util/HBaseFsck.WorkItemOverlapMerge.html    | 10099 ++---
 .../hbase/util/HBaseFsck.WorkItemRegion.html    | 10099 ++---
 .../org/apache/hadoop/hbase/util/HBaseFsck.html | 10099 ++---
 downloads.html                                  |     4 +-
 export_control.html                             |     4 +-
 index.html                                      |     4 +-
 integration.html                                |     4 +-
 issue-tracking.html                             |     4 +-
 license.html                                    |     4 +-
 mail-lists.html                                 |     4 +-
 metrics.html                                    |     4 +-
 old_news.html                                   |     4 +-
 plugin-management.html                          |     4 +-
 plugins.html                                    |     4 +-
 poweredbyhbase.html                             |     4 +-
 project-info.html                               |     4 +-
 project-reports.html                            |     4 +-
 project-summary.html                            |     4 +-
 pseudo-distributed.html                         |     4 +-
 replication.html                                |     4 +-
 resources.html                                  |     4 +-
 source-repository.html                          |     4 +-
 sponsors.html                                   |     4 +-
 supportingprojects.html                         |     4 +-
 team-list.html                                  |     4 +-
 testdevapidocs/index-all.html                   |     2 +
 .../hadoop/hbase/io/hfile/package-tree.html     |     2 +-
 .../apache/hadoop/hbase/master/TestMaster.html  |    62 +-
 .../org/apache/hadoop/hbase/package-tree.html   |    10 +-
 .../hadoop/hbase/procedure2/package-tree.html   |     4 +-
 .../hadoop/hbase/regionserver/package-tree.html |     4 +-
 .../apache/hadoop/hbase/test/package-tree.html  |     4 +-
 .../apache/hadoop/hbase/wal/package-tree.html   |     2 +-
 .../apache/hadoop/hbase/master/TestMaster.html  |   448 +-
 126 files changed, 139290 insertions(+), 138137 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/acid-semantics.html
----------------------------------------------------------------------
diff --git a/acid-semantics.html b/acid-semantics.html
index b541a1b..4be0bf9 100644
--- a/acid-semantics.html
+++ b/acid-semantics.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Apache HBase (TM) ACID Properties
@@ -601,7 +601,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/apache_hbase_reference_guide.pdf
----------------------------------------------------------------------
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 2862164..ef7d6cb 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,8 +5,8 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20180824142955+00'00')
-/CreationDate (D:20180824144339+00'00')
+/ModDate (D:20180825142953+00'00')
+/CreationDate (D:20180825144405+00'00')
 >>
 endobj
 2 0 obj

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/book.html
----------------------------------------------------------------------
diff --git a/book.html b/book.html
index cbd3531..aa2eb41 100644
--- a/book.html
+++ b/book.html
@@ -41151,7 +41151,7 @@ org/apache/hadoop/hbase/security/access/AccessControlClient.revoke:(Lorg/apache/
 <div id="footer">
 <div id="footer-text">
 Version 3.0.0-SNAPSHOT<br>
-Last updated 2018-08-24 14:29:55 UTC
+Last updated 2018-08-25 14:29:53 UTC
 </div>
 </div>
 </body>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/bulk-loads.html
----------------------------------------------------------------------
diff --git a/bulk-loads.html b/bulk-loads.html
index 5a76fd9..b27730c 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Bulk Loads in Apache HBase (TM)
@@ -306,7 +306,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 


[09/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</sp

<TRUNCATED>

[33/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 2caa10b..3d115f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -347,11 +347,11 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
+<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">SplitLogManager.TerminationStatus</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">MasterRpcServices.BalanceSwitchMode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">MetricsMasterSourceFactoryImpl.FactoryStorage</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">SplitLogManager.ResubmitDirective</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/RegionState.State.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">RegionState.State</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">MetricsMasterSourceFactoryImpl.FactoryStorage</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.master.<a href="../../../../../org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html" title="enum in org.apache.hadoop.hbase.master"><span class="typeNameLink">SplitLogManager.TerminationStatus</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index a189946..5ab5cf5 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -216,10 +216,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/MetaProcedureInterface.MetaOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">MetaProcedureInterface.MetaOperationType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.TableOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">TableProcedureInterface.TableOperationType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">PeerProcedureInterface.PeerOperationType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.TableOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">TableProcedureInterface.TableOperationType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.ServerOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">ServerProcedureInterface.ServerOperationType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.master.procedure.<a href="../../../../../../org/apache/hadoop/hbase/master/procedure/MetaProcedureInterface.MetaOperationType.html" title="enum in org.apache.hadoop.hbase.master.procedure"><span class="typeNameLink">MetaProcedureInterface.MetaOperationType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 115d29f..e97ab98 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -437,19 +437,19 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompareOperator.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompareOperator</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HConstants.OperationStatusCode.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HConstants.OperationStatusCode</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HealthChecker.HealthCheckerExitStatus</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ClusterMetrics.Option.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ClusterMetrics.Option</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeepDeletedCells.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeepDeletedCells</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MemoryCompactionPolicy.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MemoryCompactionPolicy</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Coprocessor.State.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Coprocessor.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeyValue.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeyValue.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MetaTableAccessor.QueryType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Cell.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Cell.Type</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompatibilitySingletonFactory.SingletonStorage.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompatibilitySingletonFactory.SingletonStorage</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CellBuilderType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CellBuilderType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Size.Unit.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Size.Unit</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MetaTableAccessor.QueryType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeyValue.Type.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeyValue.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompatibilitySingletonFactory.SingletonStorage.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompatibilitySingletonFactory.SingletonStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HealthChecker.HealthCheckerExitStatus</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/Coprocessor.State.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">Coprocessor.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/MemoryCompactionPolicy.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">MemoryCompactionPolicy</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/CompareOperator.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">CompareOperator</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/KeepDeletedCells.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">KeepDeletedCells</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ClusterMetrics.Option.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ClusterMetrics.Option</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HConstants.OperationStatusCode.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HConstants.OperationStatusCode</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index f7d34e6..3de8cda 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -216,11 +216,11 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
+<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/LockedResourceType.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">LockedResourceType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/LockType.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">LockType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">RootProcedureState.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.LockState.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">Procedure.LockState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">RootProcedureState.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">StateMachineProcedure.Flow</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/LockedResourceType.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">LockedResourceType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 826cc63..d99fa26 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -229,13 +229,13 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">SpaceViolationPolicy</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/OperationQuota.OperationType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">OperationQuota.OperationType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/RpcThrottlingException.Type.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">RpcThrottlingException.Type</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaScope.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">QuotaScope</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">QuotaType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/ThrottleType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">ThrottleType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/RpcThrottlingException.Type.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">RpcThrottlingException.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/ThrottlingException.Type.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">ThrottlingException.Type</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/QuotaScope.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">QuotaScope</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">SpaceViolationPolicy</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.quotas.<a href="../../../../../org/apache/hadoop/hbase/quotas/OperationQuota.OperationType.html" title="enum in org.apache.hadoop.hbase.quotas"><span class="typeNameLink">OperationQuota.OperationType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 658d1e5..5a36b1d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -708,20 +708,20 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/BloomType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">BloomType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.Type.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TimeRangeTracker.Type</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MemStoreCompactionStrategy.Action</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ChunkCreator.ChunkType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScannerContext.LimitScope</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScanType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScanType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.StepDirection.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">DefaultHeapMemoryTuner.StepDirection</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">HRegion.FlushResult.Result</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Region.Operation.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">Region.Operation</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MetricsRegionServerSourceFactoryImpl.FactoryStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TimeRangeTracker.Type.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TimeRangeTracker.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/FlushType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">FlushType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScannerContext.NextState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">CompactingMemStore.IndexType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.StepDirection.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">DefaultHeapMemoryTuner.StepDirection</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ChunkCreator.ChunkType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/Region.Operation.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">Region.Operation</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">SplitLogWorker.TaskExecutor.Status</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScannerContext.NextState</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MetricsRegionServerSourceFactoryImpl.FactoryStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">HRegion.FlushResult.Result</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/BloomType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">BloomType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">ScannerContext.LimitScope</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">MemStoreCompactionStrategy.Action</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 23060c2..b377318 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -131,8 +131,8 @@
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
 <li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/StripeCompactionScanQueryMatcher.DropDeletesInOutput.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">StripeCompactionScanQueryMatcher.DropDeletesInOutput</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.DeleteResult.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">DeleteTracker.DeleteResult</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">ScanQueryMatcher.MatchCode</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.querymatcher.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/querymatcher/DeleteTracker.DeleteResult.html" title="enum in org.apache.hadoop.hbase.regionserver.querymatcher"><span class="typeNameLink">DeleteTracker.DeleteResult</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index f0c55c8..7eedc5c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -247,9 +247,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
+<li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrResult.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">ProtobufLogReader.WALHdrResult</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.Type.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">RingBufferTruck.Type</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/CompressionContext.DictionaryIndex.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">CompressionContext.DictionaryIndex</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.wal.<a href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrResult.html" title="enum in org.apache.hadoop.hbase.regionserver.wal"><span class="typeNameLink">ProtobufLogReader.WALHdrResult</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html b/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
index f8e4b11..3c7146a 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
@@ -160,8 +160,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.replication.<a href="../../../../../org/apache/hadoop/hbase/replication/SyncReplicationState.html" title="enum in org.apache.hadoop.hbase.replication"><span class="typeNameLink">SyncReplicationState</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.replication.<a href="../../../../../org/apache/hadoop/hbase/replication/ReplicationPeer.PeerState.html" title="enum in org.apache.hadoop.hbase.replication"><span class="typeNameLink">ReplicationPeer.PeerState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.replication.<a href="../../../../../org/apache/hadoop/hbase/replication/SyncReplicationState.html" title="enum in org.apache.hadoop.hbase.replication"><span class="typeNameLink">SyncReplicationState</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
index 30c4e73..cd3870f 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
@@ -207,8 +207,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.replication.regionserver.<a href="../../../../../../org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.SourceHolder.html" title="enum in org.apache.hadoop.hbase.replication.regionserver"><span class="typeNameLink">MetricsReplicationSourceFactoryImpl.SourceHolder</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.replication.regionserver.<a href="../../../../../../org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html" title="enum in org.apache.hadoop.hbase.replication.regionserver"><span class="typeNameLink">ReplicationSourceShipper.WorkerState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.replication.regionserver.<a href="../../../../../../org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.SourceHolder.html" title="enum in org.apache.hadoop.hbase.replication.regionserver"><span class="typeNameLink">MetricsReplicationSourceFactoryImpl.SourceHolder</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index f594c43..8051208 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -142,9 +142,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
+<li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/AccessController.OpType.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">AccessController.OpType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/Permission.Action.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">Permission.Action</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/AccessControlFilter.Strategy.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">AccessControlFilter.Strategy</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.security.access.<a href="../../../../../../org/apache/hadoop/hbase/security/access/AccessController.OpType.html" title="enum in org.apache.hadoop.hbase.security.access"><span class="typeNameLink">AccessController.OpType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
index dfa02b5..7ba3a64 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
@@ -191,8 +191,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/SaslStatus.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">SaslStatus</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/AuthMethod.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">AuthMethod</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/SaslStatus.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">SaslStatus</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.security.<a href="../../../../../org/apache/hadoop/hbase/security/SaslUtil.QualityOfProtection.html" title="enum in org.apache.hadoop.hbase.security"><span class="typeNameLink">SaslUtil.QualityOfProtection</span></a></li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
index 2923faf..2853d09 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
@@ -199,9 +199,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.thrift.<a href="../../../../../org/apache/hadoop/hbase/thrift/ThriftServerRunner.ImplType.html" title="enum in org.apache.hadoop.hbase.thrift"><span class="typeNameLink">ThriftServerRunner.ImplType</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.thrift.<a href="../../../../../org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.thrift"><span class="typeNameLink">MetricsThriftServerSourceFactoryImpl.FactoryStorage</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.thrift.<a href="../../../../../org/apache/hadoop/hbase/thrift/ThriftMetrics.ThriftServerType.html" title="enum in org.apache.hadoop.hbase.thrift"><span class="typeNameLink">ThriftMetrics.ThriftServerType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.thrift.<a href="../../../../../org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.FactoryStorage.html" title="enum in org.apache.hadoop.hbase.thrift"><span class="typeNameLink">MetricsThriftServerSourceFactoryImpl.FactoryStorage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.thrift.<a href="../../../../../org/apache/hadoop/hbase/thrift/ThriftServerRunner.ImplType.html" title="enum in org.apache.hadoop.hbase.thrift"><span class="typeNameLink">ThriftServerRunner.ImplType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index 5fb98ee..5fd4df8 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2069">HBaseFsck.CheckRegionConsistencyWorkItem</a>
+<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2114">HBaseFsck.CheckRegionConsistencyWorkItem</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 </li>
@@ -211,7 +211,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>key</h4>
-<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2070">key</a></pre>
+<pre>private final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2115">key</a></pre>
 </li>
 </ul>
 <a name="hbi">
@@ -220,7 +220,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hbi</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2071">hbi</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2116">hbi</a></pre>
 </li>
 </ul>
 </li>
@@ -237,7 +237,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>CheckRegionConsistencyWorkItem</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2073">CheckRegionConsistencyWorkItem</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;key,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2118">CheckRegionConsistencyWorkItem</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;key,
                                <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi)</pre>
 </li>
 </ul>
@@ -255,7 +255,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2079">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html#line.2124">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index 55e73f4..5572799 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static enum <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4130">HBaseFsck.ErrorReporter.ERROR_CODE</a>
+<pre>public static enum <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4175">HBaseFsck.ErrorReporter.ERROR_CODE</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang">Enum</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;</pre>
 </li>
 </ul>
@@ -315,7 +315,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>UNKNOWN</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4131">UNKNOWN</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4176">UNKNOWN</a></pre>
 </li>
 </ul>
 <a name="NO_META_REGION">
@@ -324,7 +324,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_META_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4131">NO_META_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4176">NO_META_REGION</a></pre>
 </li>
 </ul>
 <a name="NULL_META_REGION">
@@ -333,7 +333,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NULL_META_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4131">NULL_META_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4176">NULL_META_REGION</a></pre>
 </li>
 </ul>
 <a name="NO_VERSION_FILE">
@@ -342,7 +342,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_VERSION_FILE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4131">NO_VERSION_FILE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4176">NO_VERSION_FILE</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_META_HDFS">
@@ -351,7 +351,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_META_HDFS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4131">NOT_IN_META_HDFS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4176">NOT_IN_META_HDFS</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_META">
@@ -360,7 +360,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_META</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4131">NOT_IN_META</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4176">NOT_IN_META</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_META_OR_DEPLOYED">
@@ -369,7 +369,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_META_OR_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4132">NOT_IN_META_OR_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4177">NOT_IN_META_OR_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_HDFS_OR_DEPLOYED">
@@ -378,7 +378,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_HDFS_OR_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4132">NOT_IN_HDFS_OR_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4177">NOT_IN_HDFS_OR_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="NOT_IN_HDFS">
@@ -387,7 +387,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_IN_HDFS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4132">NOT_IN_HDFS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4177">NOT_IN_HDFS</a></pre>
 </li>
 </ul>
 <a name="SERVER_DOES_NOT_MATCH_META">
@@ -396,7 +396,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>SERVER_DOES_NOT_MATCH_META</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4132">SERVER_DOES_NOT_MATCH_META</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4177">SERVER_DOES_NOT_MATCH_META</a></pre>
 </li>
 </ul>
 <a name="NOT_DEPLOYED">
@@ -405,7 +405,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NOT_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4133">NOT_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4178">NOT_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="MULTI_DEPLOYED">
@@ -414,7 +414,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>MULTI_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4133">MULTI_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4178">MULTI_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="SHOULD_NOT_BE_DEPLOYED">
@@ -423,7 +423,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>SHOULD_NOT_BE_DEPLOYED</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4133">SHOULD_NOT_BE_DEPLOYED</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4178">SHOULD_NOT_BE_DEPLOYED</a></pre>
 </li>
 </ul>
 <a name="MULTI_META_REGION">
@@ -432,7 +432,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>MULTI_META_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4133">MULTI_META_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4178">MULTI_META_REGION</a></pre>
 </li>
 </ul>
 <a name="RS_CONNECT_FAILURE">
@@ -441,7 +441,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>RS_CONNECT_FAILURE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4133">RS_CONNECT_FAILURE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4178">RS_CONNECT_FAILURE</a></pre>
 </li>
 </ul>
 <a name="FIRST_REGION_STARTKEY_NOT_EMPTY">
@@ -450,7 +450,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>FIRST_REGION_STARTKEY_NOT_EMPTY</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4134">FIRST_REGION_STARTKEY_NOT_EMPTY</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4179">FIRST_REGION_STARTKEY_NOT_EMPTY</a></pre>
 </li>
 </ul>
 <a name="LAST_REGION_ENDKEY_NOT_EMPTY">
@@ -459,7 +459,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LAST_REGION_ENDKEY_NOT_EMPTY</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4134">LAST_REGION_ENDKEY_NOT_EMPTY</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4179">LAST_REGION_ENDKEY_NOT_EMPTY</a></pre>
 </li>
 </ul>
 <a name="DUPE_STARTKEYS">
@@ -468,7 +468,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>DUPE_STARTKEYS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4134">DUPE_STARTKEYS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4179">DUPE_STARTKEYS</a></pre>
 </li>
 </ul>
 <a name="HOLE_IN_REGION_CHAIN">
@@ -477,7 +477,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>HOLE_IN_REGION_CHAIN</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4135">HOLE_IN_REGION_CHAIN</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4180">HOLE_IN_REGION_CHAIN</a></pre>
 </li>
 </ul>
 <a name="OVERLAP_IN_REGION_CHAIN">
@@ -486,7 +486,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>OVERLAP_IN_REGION_CHAIN</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4135">OVERLAP_IN_REGION_CHAIN</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4180">OVERLAP_IN_REGION_CHAIN</a></pre>
 </li>
 </ul>
 <a name="REGION_CYCLE">
@@ -495,7 +495,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>REGION_CYCLE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4135">REGION_CYCLE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4180">REGION_CYCLE</a></pre>
 </li>
 </ul>
 <a name="DEGENERATE_REGION">
@@ -504,7 +504,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>DEGENERATE_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4135">DEGENERATE_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4180">DEGENERATE_REGION</a></pre>
 </li>
 </ul>
 <a name="ORPHAN_HDFS_REGION">
@@ -513,7 +513,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>ORPHAN_HDFS_REGION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4136">ORPHAN_HDFS_REGION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4181">ORPHAN_HDFS_REGION</a></pre>
 </li>
 </ul>
 <a name="LINGERING_SPLIT_PARENT">
@@ -522,7 +522,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LINGERING_SPLIT_PARENT</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4136">LINGERING_SPLIT_PARENT</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4181">LINGERING_SPLIT_PARENT</a></pre>
 </li>
 </ul>
 <a name="NO_TABLEINFO_FILE">
@@ -531,7 +531,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_TABLEINFO_FILE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4136">NO_TABLEINFO_FILE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4181">NO_TABLEINFO_FILE</a></pre>
 </li>
 </ul>
 <a name="LINGERING_REFERENCE_HFILE">
@@ -540,7 +540,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LINGERING_REFERENCE_HFILE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4136">LINGERING_REFERENCE_HFILE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4181">LINGERING_REFERENCE_HFILE</a></pre>
 </li>
 </ul>
 <a name="LINGERING_HFILELINK">
@@ -549,7 +549,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>LINGERING_HFILELINK</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4137">LINGERING_HFILELINK</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4182">LINGERING_HFILELINK</a></pre>
 </li>
 </ul>
 <a name="WRONG_USAGE">
@@ -558,7 +558,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>WRONG_USAGE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4137">WRONG_USAGE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4182">WRONG_USAGE</a></pre>
 </li>
 </ul>
 <a name="EMPTY_META_CELL">
@@ -567,7 +567,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>EMPTY_META_CELL</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4137">EMPTY_META_CELL</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4182">EMPTY_META_CELL</a></pre>
 </li>
 </ul>
 <a name="EXPIRED_TABLE_LOCK">
@@ -576,7 +576,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>EXPIRED_TABLE_LOCK</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4137">EXPIRED_TABLE_LOCK</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4182">EXPIRED_TABLE_LOCK</a></pre>
 </li>
 </ul>
 <a name="BOUNDARIES_ERROR">
@@ -585,7 +585,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>BOUNDARIES_ERROR</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4137">BOUNDARIES_ERROR</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4182">BOUNDARIES_ERROR</a></pre>
 </li>
 </ul>
 <a name="ORPHAN_TABLE_STATE">
@@ -594,7 +594,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>ORPHAN_TABLE_STATE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4138">ORPHAN_TABLE_STATE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4183">ORPHAN_TABLE_STATE</a></pre>
 </li>
 </ul>
 <a name="NO_TABLE_STATE">
@@ -603,7 +603,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>NO_TABLE_STATE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4138">NO_TABLE_STATE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4183">NO_TABLE_STATE</a></pre>
 </li>
 </ul>
 <a name="UNDELETED_REPLICATION_QUEUE">
@@ -612,7 +612,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>UNDELETED_REPLICATION_QUEUE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4138">UNDELETED_REPLICATION_QUEUE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4183">UNDELETED_REPLICATION_QUEUE</a></pre>
 </li>
 </ul>
 <a name="DUPE_ENDKEYS">
@@ -621,7 +621,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>DUPE_ENDKEYS</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4138">DUPE_ENDKEYS</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4183">DUPE_ENDKEYS</a></pre>
 </li>
 </ul>
 <a name="UNSUPPORTED_OPTION">
@@ -630,7 +630,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>UNSUPPORTED_OPTION</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4139">UNSUPPORTED_OPTION</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4184">UNSUPPORTED_OPTION</a></pre>
 </li>
 </ul>
 <a name="INVALID_TABLE">
@@ -639,7 +639,7 @@ the order they are declared.</div>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>INVALID_TABLE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4139">INVALID_TABLE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.4184">INVALID_TABLE</a></pre>
 </li>
 </ul>
 </li>
@@ -656,7 +656,7 @@ the order they are declared.</div>
 <ul class="blockList">
 <li class="blockList">
 <h4>values</h4>
-<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3854">values</a>()</pre>
+<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3899">values</a>()</pre>
 <div class="block">Returns an array containing the constants of this enum type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -676,7 +676,7 @@ for (HBaseFsck.ErrorReporter.ERROR_CODE c : HBaseFsck.ErrorReporter.ERROR_CODE.v
 <ul class="blockListLast">
 <li class="blockList">
 <h4>valueOf</h4>
-<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3854">valueOf</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;name)</pre>
+<pre>public static&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html#line.3899">valueOf</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;name)</pre>
 <div class="block">Returns the enum constant of this type with the specified name.
 The string must match <i>exactly</i> an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 


[20/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>   

<TRUNCATED>

[15/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      Pair&lt;Path, FSD

<TRUNCATED>

[19/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo

<TRUNCATED>

[05/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span

<TRUNCATED>

[03/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      Pair&lt;Path, FSDataOutputStream&gt; pair =<a name="line.556"></a>


<TRUNCATED>

[13/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+

<TRUNCATED>

[18/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>   

<TRUNCATED>

[02/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/downloads.html
----------------------------------------------------------------------
diff --git a/downloads.html b/downloads.html
index 148d9f7..02a56e8 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Apache HBase Downloads</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -423,7 +423,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/export_control.html
----------------------------------------------------------------------
diff --git a/export_control.html b/export_control.html
index 82189e4..aa89e23 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Export Control
@@ -331,7 +331,7 @@ for more details.</p>
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/index.html
----------------------------------------------------------------------
diff --git a/index.html b/index.html
index 388103d..30df027 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Apache HBaseâ„¢ Home</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -411,7 +411,7 @@ Apache HBase is an open-source, distributed, versioned, non-relational database
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/integration.html
----------------------------------------------------------------------
diff --git a/integration.html b/integration.html
index 2b41b4a..9972b06 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; CI Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -291,7 +291,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/issue-tracking.html
----------------------------------------------------------------------
diff --git a/issue-tracking.html b/issue-tracking.html
index 89240b8..0e409cc 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Issue Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -288,7 +288,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/license.html
----------------------------------------------------------------------
diff --git a/license.html b/license.html
index 201ba4d..c1973fc 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Licenses</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -491,7 +491,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/mail-lists.html
----------------------------------------------------------------------
diff --git a/mail-lists.html b/mail-lists.html
index cf546ca..13cb896 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Mailing Lists</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -341,7 +341,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/metrics.html
----------------------------------------------------------------------
diff --git a/metrics.html b/metrics.html
index d223bc8..33474db 100644
--- a/metrics.html
+++ b/metrics.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Apache HBase (TM) Metrics
@@ -459,7 +459,7 @@ export HBASE_REGIONSERVER_OPTS=&quot;$HBASE_JMX_OPTS -Dcom.sun.management.jmxrem
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/old_news.html
----------------------------------------------------------------------
diff --git a/old_news.html b/old_news.html
index 966509c..4ef11b1 100644
--- a/old_news.html
+++ b/old_news.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Old Apache HBase (TM) News
@@ -440,7 +440,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/plugin-management.html
----------------------------------------------------------------------
diff --git a/plugin-management.html b/plugin-management.html
index 0fbf1c0..d3e2783 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Plugin Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -440,7 +440,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/plugins.html
----------------------------------------------------------------------
diff --git a/plugins.html b/plugins.html
index 3f12747..7af6851 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Plugins</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -375,7 +375,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/poweredbyhbase.html
----------------------------------------------------------------------
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index 4b291d7..e9d44be 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Powered By Apache HBase™</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -769,7 +769,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/project-info.html
----------------------------------------------------------------------
diff --git a/project-info.html b/project-info.html
index 7270d9e..1c428ea 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Information</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -335,7 +335,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/project-reports.html
----------------------------------------------------------------------
diff --git a/project-reports.html b/project-reports.html
index 9e03457..319b823 100644
--- a/project-reports.html
+++ b/project-reports.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Generated Reports</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -305,7 +305,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/project-summary.html
----------------------------------------------------------------------
diff --git a/project-summary.html b/project-summary.html
index 2cd5d3a..3ecde6b 100644
--- a/project-summary.html
+++ b/project-summary.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Summary</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -331,7 +331,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/pseudo-distributed.html
----------------------------------------------------------------------
diff --git a/pseudo-distributed.html b/pseudo-distributed.html
index c76d68d..b337b06 100644
--- a/pseudo-distributed.html
+++ b/pseudo-distributed.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
 Running Apache HBase (TM) in pseudo-distributed mode
@@ -308,7 +308,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/replication.html
----------------------------------------------------------------------
diff --git a/replication.html b/replication.html
index 1bf2da9..0d9d548 100644
--- a/replication.html
+++ b/replication.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; 
       Apache HBase (TM) Replication
@@ -303,7 +303,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/resources.html
----------------------------------------------------------------------
diff --git a/resources.html b/resources.html
index e746b4d..f011509 100644
--- a/resources.html
+++ b/resources.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Other Apache HBase (TM) Resources</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -331,7 +331,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/source-repository.html
----------------------------------------------------------------------
diff --git a/source-repository.html b/source-repository.html
index aa37785..2d56bfb 100644
--- a/source-repository.html
+++ b/source-repository.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Source Code Management</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -299,7 +299,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/sponsors.html
----------------------------------------------------------------------
diff --git a/sponsors.html b/sponsors.html
index 9810957..d5ff65b 100644
--- a/sponsors.html
+++ b/sponsors.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Apache HBase™ Sponsors</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -333,7 +333,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/supportingprojects.html
----------------------------------------------------------------------
diff --git a/supportingprojects.html b/supportingprojects.html
index e1cdf2f..e32510f 100644
--- a/supportingprojects.html
+++ b/supportingprojects.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Supporting Projects</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -520,7 +520,7 @@ under the License. -->
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/team-list.html
----------------------------------------------------------------------
diff --git a/team-list.html b/team-list.html
index fc6111d..9880dc5 100644
--- a/team-list.html
+++ b/team-list.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180824" />
+    <meta name="Date-Revision-yyyymmdd" content="20180825" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013; Project Team</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.4-HBase.min.css" />
@@ -748,7 +748,7 @@
                         <a href="https://www.apache.org/">The Apache Software Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2018-08-24</li>
+                  <li id="publishDate" class="pull-right">Last Published: 2018-08-25</li>
             </p>
                 </div>
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/index-all.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 347b750..c9d7360 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -45624,6 +45624,8 @@
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html#testBlockIndexInternals-boolean-">testBlockIndexInternals(boolean)</a></span> - Method in class org.apache.hadoop.hbase.io.hfile.<a href="org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html" title="class in org.apache.hadoop.hbase.io.hfile">TestHFileBlockIndex</a></dt>
 <dd>&nbsp;</dd>
+<dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/master/TestMaster.html#testBlockingHbkc1WithLockFile--">testBlockingHbkc1WithLockFile()</a></span> - Method in class org.apache.hadoop.hbase.master.<a href="org/apache/hadoop/hbase/master/TestMaster.html" title="class in org.apache.hadoop.hbase.master">TestMaster</a></dt>
+<dd>&nbsp;</dd>
 <dt><a href="org/apache/hadoop/hbase/ipc/TestBlockingIPC.html" title="class in org.apache.hadoop.hbase.ipc"><span class="typeNameLink">TestBlockingIPC</span></a> - Class in <a href="org/apache/hadoop/hbase/ipc/package-summary.html">org.apache.hadoop.hbase.ipc</a></dt>
 <dd>&nbsp;</dd>
 <dt><span class="memberNameLink"><a href="org/apache/hadoop/hbase/ipc/TestBlockingIPC.html#TestBlockingIPC--">TestBlockingIPC()</a></span> - Constructor for class org.apache.hadoop.hbase.ipc.<a href="org/apache/hadoop/hbase/ipc/TestBlockingIPC.html" title="class in org.apache.hadoop.hbase.ipc">TestBlockingIPC</a></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index ec56445..4837d80 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -158,8 +158,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">TestCacheOnWrite.CacheOnWriteType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/TagUsage.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">TagUsage</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.io.hfile.<a href="../../../../../../org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html" title="enum in org.apache.hadoop.hbase.io.hfile"><span class="typeNameLink">TestCacheOnWrite.CacheOnWriteType</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/master/TestMaster.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/master/TestMaster.html b/testdevapidocs/org/apache/hadoop/hbase/master/TestMaster.html
index 4281411..d83d9d1 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/master/TestMaster.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/master/TestMaster.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var methods = {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 <li class="blockList">
 <hr>
 <br>
-<pre>public class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.65">TestMaster</a>
+<pre>public class <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.68">TestMaster</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 </li>
 </ul>
@@ -199,22 +199,26 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 </tr>
 <tr id="i2" class="altColor">
 <td class="colFirst"><code>void</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testFlushedSequenceIdPersistLoad--">testFlushedSequenceIdPersistLoad</a></span>()</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testBlockingHbkc1WithLockFile--">testBlockingHbkc1WithLockFile</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i3" class="rowColor">
 <td class="colFirst"><code>void</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMasterOpsWhileSplitting--">testMasterOpsWhileSplitting</a></span>()</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testFlushedSequenceIdPersistLoad--">testFlushedSequenceIdPersistLoad</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i4" class="altColor">
 <td class="colFirst"><code>void</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMoveRegionWhenNotInitialized--">testMoveRegionWhenNotInitialized</a></span>()</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMasterOpsWhileSplitting--">testMasterOpsWhileSplitting</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i5" class="rowColor">
 <td class="colFirst"><code>void</code></td>
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMoveThrowsPleaseHoldException--">testMoveThrowsPleaseHoldException</a></span>()</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMoveRegionWhenNotInitialized--">testMoveRegionWhenNotInitialized</a></span>()</code>&nbsp;</td>
 </tr>
 <tr id="i6" class="altColor">
 <td class="colFirst"><code>void</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMoveThrowsPleaseHoldException--">testMoveThrowsPleaseHoldException</a></span>()</code>&nbsp;</td>
+</tr>
+<tr id="i7" class="rowColor">
+<td class="colFirst"><code>void</code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/master/TestMaster.html#testMoveThrowsUnknownRegionException--">testMoveThrowsUnknownRegionException</a></span>()</code>&nbsp;</td>
 </tr>
 </table>
@@ -245,7 +249,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>CLASS_RULE</h4>
-<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/HBaseClassTestRule.html" title="class in org.apache.hadoop.hbase">HBaseClassTestRule</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.68">CLASS_RULE</a></pre>
+<pre>public static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/HBaseClassTestRule.html" title="class in org.apache.hadoop.hbase">HBaseClassTestRule</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.71">CLASS_RULE</a></pre>
 </li>
 </ul>
 <a name="TEST_UTIL">
@@ -254,7 +258,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>TEST_UTIL</h4>
-<pre>private static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/HBaseTestingUtility.html" title="class in org.apache.hadoop.hbase">HBaseTestingUtility</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.71">TEST_UTIL</a></pre>
+<pre>private static final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/HBaseTestingUtility.html" title="class in org.apache.hadoop.hbase">HBaseTestingUtility</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.74">TEST_UTIL</a></pre>
 </li>
 </ul>
 <a name="LOG">
@@ -263,7 +267,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>LOG</h4>
-<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.72">LOG</a></pre>
+<pre>private static final&nbsp;org.slf4j.Logger <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.75">LOG</a></pre>
 </li>
 </ul>
 <a name="TABLENAME">
@@ -272,7 +276,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>TABLENAME</h4>
-<pre>private static final&nbsp;org.apache.hadoop.hbase.TableName <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.73">TABLENAME</a></pre>
+<pre>private static final&nbsp;org.apache.hadoop.hbase.TableName <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.76">TABLENAME</a></pre>
 </li>
 </ul>
 <a name="FAMILYNAME">
@@ -281,7 +285,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>FAMILYNAME</h4>
-<pre>private static final&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.75">FAMILYNAME</a></pre>
+<pre>private static final&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.78">FAMILYNAME</a></pre>
 </li>
 </ul>
 <a name="admin">
@@ -290,7 +294,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>admin</h4>
-<pre>private static&nbsp;org.apache.hadoop.hbase.client.Admin <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.76">admin</a></pre>
+<pre>private static&nbsp;org.apache.hadoop.hbase.client.Admin <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.79">admin</a></pre>
 </li>
 </ul>
 <a name="name">
@@ -299,7 +303,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>name</h4>
-<pre>public&nbsp;org.junit.rules.TestName <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.79">name</a></pre>
+<pre>public&nbsp;org.junit.rules.TestName <a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.82">name</a></pre>
 </li>
 </ul>
 </li>
@@ -316,7 +320,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>TestMaster</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.65">TestMaster</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.68">TestMaster</a>()</pre>
 </li>
 </ul>
 </li>
@@ -333,7 +337,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>beforeAllTests</h4>
-<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.82">beforeAllTests</a>()
+<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.85">beforeAllTests</a>()
                            throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -347,7 +351,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>afterAllTests</h4>
-<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.91">afterAllTests</a>()
+<pre>public static&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.94">afterAllTests</a>()
                           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -361,7 +365,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>testMasterOpsWhileSplitting</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.97">testMasterOpsWhileSplitting</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.100">testMasterOpsWhileSplitting</a>()
                                  throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -375,7 +379,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>testMoveRegionWhenNotInitialized</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.139">testMoveRegionWhenNotInitialized</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.142">testMoveRegionWhenNotInitialized</a>()</pre>
 </li>
 </ul>
 <a name="testMoveThrowsUnknownRegionException--">
@@ -384,7 +388,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>testMoveThrowsUnknownRegionException</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.155">testMoveThrowsUnknownRegionException</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.158">testMoveThrowsUnknownRegionException</a>()
                                           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -398,7 +402,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>testMoveThrowsPleaseHoldException</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.177">testMoveThrowsPleaseHoldException</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.180">testMoveThrowsPleaseHoldException</a>()
                                        throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -409,10 +413,10 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <a name="testFlushedSequenceIdPersistLoad--">
 <!--   -->
 </a>
-<ul class="blockListLast">
+<ul class="blockList">
 <li class="blockList">
 <h4>testFlushedSequenceIdPersistLoad</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.200">testFlushedSequenceIdPersistLoad</a>()
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.203">testFlushedSequenceIdPersistLoad</a>()
                                       throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -420,6 +424,20 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 </dl>
 </li>
 </ul>
+<a name="testBlockingHbkc1WithLockFile--">
+<!--   -->
+</a>
+<ul class="blockListLast">
+<li class="blockList">
+<h4>testBlockingHbkc1WithLockFile</h4>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/master/TestMaster.html#line.231">testBlockingHbkc1WithLockFile</a>()
+                                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
+<dl>
+<dt><span class="throwsLabel">Throws:</span></dt>
+<dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></code></dd>
+</dl>
+</li>
+</ul>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
index 0603607..9cd3b5d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -576,15 +576,15 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.Stat.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">IntegrationTestRegionReplicaPerf.Stat</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/RESTApiClusterManager.Service.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">RESTApiClusterManager.Service</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ResourceChecker.Phase.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ResourceChecker.Phase</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HBaseClusterManager.CommandProvider.Operation.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HBaseClusterManager.CommandProvider.Operation</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/RESTApiClusterManager.RoleCommand.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">RESTApiClusterManager.RoleCommand</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ScanPerformanceEvaluation.ScanCounter.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ScanPerformanceEvaluation.ScanCounter</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.ACTION.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">IntegrationTestDDLMasterFailover.ACTION</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">PerformanceEvaluation.Counter</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/RESTApiClusterManager.Service.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">RESTApiClusterManager.Service</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ScanPerformanceEvaluation.ScanCounter.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ScanPerformanceEvaluation.ScanCounter</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.Stat.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">IntegrationTestRegionReplicaPerf.Stat</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/ClusterManager.ServiceType.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">ClusterManager.ServiceType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/HBaseClusterManager.CommandProvider.Operation.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">HBaseClusterManager.CommandProvider.Operation</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.<a href="../../../../org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html" title="enum in org.apache.hadoop.hbase"><span class="typeNameLink">PerformanceEvaluation.Counter</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 4aca12e..773eab8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -206,9 +206,9 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/TestYieldProcedures.TestStateMachineProcedure.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">TestYieldProcedures.TestStateMachineProcedure.State</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">TestProcedureRecovery.TestStateMachineProcedure.State</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.TestSMProcedureState.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">TestStateMachineProcedure.TestSMProcedureState</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">TestProcedureRecovery.TestStateMachineProcedure.State</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.procedure2.<a href="../../../../../org/apache/hadoop/hbase/procedure2/TestYieldProcedures.TestStateMachineProcedure.State.html" title="enum in org.apache.hadoop.hbase.procedure2"><span class="typeNameLink">TestYieldProcedures.TestStateMachineProcedure.State</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 60b513e..a0dec16 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -669,10 +669,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TestAtomicOperation.TestStep.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TestAtomicOperation.TestStep</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.CacheOnWriteType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TestCacheOnWriteInSchema.CacheOnWriteType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.ActionType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TestMultiLogThreshold.ActionType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TestAtomicOperation.TestStep.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TestAtomicOperation.TestStep</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.Manipulation.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">DataBlockEncodingTool.Manipulation</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.CacheOnWriteType.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TestCacheOnWriteInSchema.CacheOnWriteType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.regionserver.<a href="../../../../../org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.Metric.html" title="enum in org.apache.hadoop.hbase.regionserver"><span class="typeNameLink">TestRegionServerReadRequestMetrics.Metric</span></a></li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
index 05d71ef..5ad4f9a 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
@@ -253,10 +253,10 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.test.<a href="../../../../../org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.Counters.html" title="enum in org.apache.hadoop.hbase.test"><span class="typeNameLink">IntegrationTestLoadAndVerify.Counters</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.test.<a href="../../../../../org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.Verify.Counts.html" title="enum in org.apache.hadoop.hbase.test"><span class="typeNameLink">IntegrationTestBigLinkedList.Verify.Counts</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.test.<a href="../../../../../org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.Counters.html" title="enum in org.apache.hadoop.hbase.test"><span class="typeNameLink">IntegrationTestWithCellVisibilityLoadAndVerify.Counters</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.test.<a href="../../../../../org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.Generator.Counts.html" title="enum in org.apache.hadoop.hbase.test"><span class="typeNameLink">IntegrationTestBigLinkedList.Generator.Counts</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.test.<a href="../../../../../org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.Counters.html" title="enum in org.apache.hadoop.hbase.test"><span class="typeNameLink">IntegrationTestLoadAndVerify.Counters</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.test.<a href="../../../../../org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.Verify.Counts.html" title="enum in org.apache.hadoop.hbase.test"><span class="typeNameLink">IntegrationTestBigLinkedList.Verify.Counts</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index 5282059..b21ae88 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -142,8 +142,8 @@
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
 <li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/IOTestProvider.AllowedOperations.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">IOTestProvider.AllowedOperations</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/FaultyFSLog.FailureType.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">FaultyFSLog.FailureType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">TestWALSplit.Corruptions</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/FaultyFSLog.FailureType.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">FaultyFSLog.FailureType</span></a></li>
 </ul>
 </li>
 </ul>


[31/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index e4fb91b..bdedcea 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -126,7 +126,7 @@
 </dl>
 <hr>
 <br>
-<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.1281">HBaseFsck.RegionRepairException</a>
+<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.1326">HBaseFsck.RegionRepairException</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Exception thrown when a integrity repair operation fails in an
  unresolvable way.</div>
@@ -221,7 +221,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 <ul class="blockList">
 <li class="blockList">
 <h4>serialVersionUID</h4>
-<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1282">serialVersionUID</a></pre>
+<pre>private static final&nbsp;long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1327">serialVersionUID</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.RegionRepairException.serialVersionUID">Constant Field Values</a></dd>
@@ -234,7 +234,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 <ul class="blockListLast">
 <li class="blockList">
 <h4>ioe</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1283">ioe</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1328">ioe</a></pre>
 </li>
 </ul>
 </li>
@@ -251,7 +251,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 <ul class="blockListLast">
 <li class="blockList">
 <h4>RegionRepairException</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1284">RegionRepairException</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;s,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html#line.1329">RegionRepairException</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;s,
                              <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a>&nbsp;ioe)</pre>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index fe72f1a..c949650 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2977">HBaseFsck.TableInfo.HDFSIntegrityFixer</a>
+<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3022">HBaseFsck.TableInfo.HDFSIntegrityFixer</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo.IntegrityFixSuggester</a></pre>
 <div class="block">This handler fixes integrity errors from hdfs information.  There are
  basically three classes of integrity problems 1) holes, 2) overlaps, and
@@ -295,7 +295,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>conf</h4>
-<pre>org.apache.hadoop.conf.Configuration <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2978">conf</a></pre>
+<pre>org.apache.hadoop.conf.Configuration <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3023">conf</a></pre>
 </li>
 </ul>
 <a name="fixOverlaps">
@@ -304,7 +304,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockListLast">
 <li class="blockList">
 <h4>fixOverlaps</h4>
-<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2980">fixOverlaps</a></pre>
+<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3025">fixOverlaps</a></pre>
 </li>
 </ul>
 </li>
@@ -321,7 +321,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HDFSIntegrityFixer</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2982">HDFSIntegrityFixer</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3027">HDFSIntegrityFixer</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
                    <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors,
                    org.apache.hadoop.conf.Configuration&nbsp;conf,
                    boolean&nbsp;fixHoles,
@@ -342,7 +342,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionStartKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.2996">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;next)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3041">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;next)
                                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">This is a special case hole -- when the first region of a table is
  missing from META, HBase doesn't acknowledge the existance of the
@@ -367,7 +367,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionEndKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3016">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3061">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleRegionEndKeyNotEmpty-byte:A-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling case where a Table has a last region that does not
@@ -391,7 +391,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleHoleInRegionChain</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3038">handleHoleInRegionChain</a>(byte[]&nbsp;holeStartKey,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3083">handleHoleInRegionChain</a>(byte[]&nbsp;holeStartKey,
                                     byte[]&nbsp;holeStopKey)
                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">There is a hole in the hdfs regions that violates the table integrity
@@ -415,7 +415,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>handleOverlapGroup</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3068">handleOverlapGroup</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3113">handleOverlapGroup</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
                         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">This takes set of overlapping regions and merges them into a single
  region.  This covers cases like degenerate regions, shared start key,
@@ -444,7 +444,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>removeParentsAndFixSplits</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3093">removeParentsAndFixSplits</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3138">removeParentsAndFixSplits</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
                         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -458,7 +458,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockList">
 <li class="blockList">
 <h4>mergeOverlaps</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3205">mergeOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3250">mergeOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlap)
             throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="throwsLabel">Throws:</span></dt>
@@ -472,7 +472,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo
 <ul class="blockListLast">
 <li class="blockList">
 <h4>sidelineBigOverlaps</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3283">sidelineBigOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;bigOverlap)
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html#line.3328">sidelineBigOverlaps</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;bigOverlap)
                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Sideline some regions in a big overlap group so that it
  will have fewer regions, and it is easier to merge them later on.</div>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index 5929a03..f4a6210 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2893">HBaseFsck.TableInfo.IntegrityFixSuggester</a>
+<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2938">HBaseFsck.TableInfo.IntegrityFixSuggester</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html" title="class in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandlerImpl</a></pre>
 </li>
 </ul>
@@ -267,7 +267,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockListLast">
 <li class="blockList">
 <h4>errors</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2894">errors</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2939">errors</a></pre>
 </li>
 </ul>
 </li>
@@ -284,7 +284,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockListLast">
 <li class="blockList">
 <h4>IntegrityFixSuggester</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2896">IntegrityFixSuggester</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2941">IntegrityFixSuggester</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;ti,
                       <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors)</pre>
 </li>
 </ul>
@@ -302,7 +302,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionStartKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2902">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2947">handleRegionStartKeyNotEmpty</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
                                   throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleRegionStartKeyNotEmpty-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling case where a Table has a first region that does not
@@ -327,7 +327,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleRegionEndKeyNotEmpty</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2910">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2955">handleRegionEndKeyNotEmpty</a>(byte[]&nbsp;curEndKey)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleRegionEndKeyNotEmpty-byte:A-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling case where a Table has a last region that does not
@@ -351,7 +351,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleDegenerateRegion</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2917">handleDegenerateRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2962">handleDegenerateRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi)
                             throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleDegenerateRegion-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
 <div class="block">Callback for handling a region that has the same start and end key.</div>
@@ -373,7 +373,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleDuplicateStartKeys</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2923">handleDuplicateStartKeys</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2968">handleDuplicateStartKeys</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
                                      <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r2)
                               throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleDuplicateStartKeys-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
@@ -398,7 +398,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleSplit</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2935">handleSplit</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2980">handleSplit</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r1,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;r2)
                  throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from interface:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html#handleSplit-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandler</a></code></span></div>
@@ -419,7 +419,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockList">
 <li class="blockList">
 <h4>handleOverlapInRegionChain</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2947">handleOverlapInRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi1,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2992">handleOverlapInRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi1,
                                        <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hi2)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleOverlapInRegionChain-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo-">TableIntegrityErrorHandlerImpl</a></code></span></div>
@@ -446,7 +446,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrity
 <ul class="blockListLast">
 <li class="blockList">
 <h4>handleHoleInRegionChain</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2954">handleHoleInRegionChain</a>(byte[]&nbsp;holeStart,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html#line.2999">handleHoleInRegionChain</a>(byte[]&nbsp;holeStart,
                                     byte[]&nbsp;holeStop)
                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block"><span class="descfrmTypeLabel">Description copied from class:&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.html#handleHoleInRegionChain-byte:A-byte:A-">TableIntegrityErrorHandlerImpl</a></code></span></div>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index 52a06c6..e71f31c 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2798">HBaseFsck.TableInfo</a>
+<pre>public class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2843">HBaseFsck.TableInfo</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 <div class="block">Maintain information about a particular table.</div>
 </li>
@@ -293,7 +293,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>tableName</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2799">tableName</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2844">tableName</a></pre>
 </li>
 </ul>
 <a name="deployedOn">
@@ -302,7 +302,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedOn</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/TreeSet.html?is-external=true" title="class or interface in java.util">TreeSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2800">deployedOn</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/TreeSet.html?is-external=true" title="class or interface in java.util">TreeSet</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2845">deployedOn</a></pre>
 </li>
 </ul>
 <a name="backwards">
@@ -311,7 +311,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>backwards</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2803">backwards</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2848">backwards</a></pre>
 </li>
 </ul>
 <a name="sidelinedRegions">
@@ -320,7 +320,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>sidelinedRegions</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;org.apache.hadoop.fs.Path,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2806">sidelinedRegions</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</a>&lt;org.apache.hadoop.fs.Path,<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2851">sidelinedRegions</a></pre>
 </li>
 </ul>
 <a name="sc">
@@ -329,7 +329,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>sc</h4>
-<pre>final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RegionSplitCalculator.html" title="class in org.apache.hadoop.hbase.util">RegionSplitCalculator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2809">sc</a></pre>
+<pre>final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/RegionSplitCalculator.html" title="class in org.apache.hadoop.hbase.util">RegionSplitCalculator</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2854">sc</a></pre>
 </li>
 </ul>
 <a name="htds">
@@ -338,7 +338,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>htds</h4>
-<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2812">htds</a></pre>
+<pre>final&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2857">htds</a></pre>
 </li>
 </ul>
 <a name="overlapGroups">
@@ -347,7 +347,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>overlapGroups</h4>
-<pre>final&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2815">overlapGroups</a></pre>
+<pre>final&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2860">overlapGroups</a></pre>
 </li>
 </ul>
 <a name="regionsFromMeta">
@@ -356,7 +356,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>regionsFromMeta</h4>
-<pre>private&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2819">regionsFromMeta</a></pre>
+<pre>private&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2864">regionsFromMeta</a></pre>
 </li>
 </ul>
 </li>
@@ -373,7 +373,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>TableInfo</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2821">TableInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;name)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2866">TableInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;name)</pre>
 </li>
 </ul>
 </li>
@@ -390,7 +390,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getHTD</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2829">getHTD</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/TableDescriptor.html" title="interface in org.apache.hadoop.hbase.client">TableDescriptor</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2874">getHTD</a>()</pre>
 <dl>
 <dt><span class="returnLabel">Returns:</span></dt>
 <dd>descriptor common to all regions.  null if are none or multiple!</dd>
@@ -403,7 +403,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>addRegionInfo</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2839">addRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hir)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2884">addRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hir)</pre>
 </li>
 </ul>
 <a name="addServer-org.apache.hadoop.hbase.ServerName-">
@@ -412,7 +412,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>addServer</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2864">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;server)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2909">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;server)</pre>
 </li>
 </ul>
 <a name="getName--">
@@ -421,7 +421,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getName</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2868">getName</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2913">getName</a>()</pre>
 </li>
 </ul>
 <a name="getNumRegions--">
@@ -430,7 +430,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getNumRegions</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2872">getNumRegions</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2917">getNumRegions</a>()</pre>
 </li>
 </ul>
 <a name="getRegionsFromMeta--">
@@ -439,7 +439,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionsFromMeta</h4>
-<pre>public&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2876">getRegionsFromMeta</a>()</pre>
+<pre>public&nbsp;org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.2921">getRegionsFromMeta</a>()</pre>
 </li>
 </ul>
 <a name="checkRegionChain-org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler-">
@@ -448,7 +448,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>checkRegionChain</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3331">checkRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler)
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3376">checkRegionChain</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler)
                          throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <div class="block">Check the region chain (from META) of this table.  We are looking for
  holes, overlaps, and cycles.</div>
@@ -466,7 +466,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>handleOverlapsParallel</h4>
-<pre>private&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3461">handleOverlapsParallel</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler,
+<pre>private&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3506">handleOverlapsParallel</a>(<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler,
                                        byte[]&nbsp;prevKey)
                                 throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
@@ -481,7 +481,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>dump</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3498">dump</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;byte[]&gt;&nbsp;splits,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html#line.3543">dump</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true" title="class or interface in java.util">SortedSet</a>&lt;byte[]&gt;&nbsp;splits,
           org.apache.hbase.thirdparty.com.google.common.collect.Multimap&lt;byte[],<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;regions)</pre>
 <div class="block">This dumps data in a visually reasonable way for visual debugging</div>
 <dl>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index eb36d53..b78cf5f 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4359">HBaseFsck.WorkItemHdfsDir</a>
+<pre>class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4404">HBaseFsck.WorkItemHdfsDir</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 <div class="block">Contact hdfs and get all information about specified table directory into
@@ -218,7 +218,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>tableDir</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FileStatus <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4360">tableDir</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.FileStatus <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4405">tableDir</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -227,7 +227,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4361">errors</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4406">errors</a></pre>
 </li>
 </ul>
 <a name="fs">
@@ -236,7 +236,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>fs</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FileSystem <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4362">fs</a></pre>
+<pre>private&nbsp;org.apache.hadoop.fs.FileSystem <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4407">fs</a></pre>
 </li>
 </ul>
 </li>
@@ -253,7 +253,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemHdfsDir</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4364">WorkItemHdfsDir</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4409">WorkItemHdfsDir</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                 <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors,
                 org.apache.hadoop.fs.FileStatus&nbsp;status)</pre>
 </li>
@@ -272,7 +272,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4372">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html#line.4417">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true" title="class or interface in java.lang">InterruptedException</a>,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true" title="class or interface in java.util.concurrent">ExecutionException</a></pre>
 <dl>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index 81541f9..1a11c40 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4479">HBaseFsck.WorkItemHdfsRegionInfo</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4524">HBaseFsck.WorkItemHdfsRegionInfo</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 <div class="block">Contact hdfs and get all information about specified table directory into
@@ -218,7 +218,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>hbi</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4480">hbi</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4525">hbi</a></pre>
 </li>
 </ul>
 <a name="hbck">
@@ -227,7 +227,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>hbck</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4481">hbck</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4526">hbck</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -236,7 +236,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4482">errors</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4527">errors</a></pre>
 </li>
 </ul>
 </li>
@@ -253,7 +253,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemHdfsRegionInfo</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4484">WorkItemHdfsRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4529">WorkItemHdfsRegionInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;hbi,
                        <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a>&nbsp;hbck,
                        <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors)</pre>
 </li>
@@ -272,7 +272,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4491">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html#line.4536">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index c3990e8..3aa1909 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2779">HBaseFsck.WorkItemOverlapMerge</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.2824">HBaseFsck.WorkItemOverlapMerge</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 </li>
@@ -211,7 +211,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>handler</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2780">handler</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2825">handler</a></pre>
 </li>
 </ul>
 <a name="overlapgroup">
@@ -220,7 +220,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>overlapgroup</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2781">overlapgroup</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2826">overlapgroup</a></pre>
 </li>
 </ul>
 </li>
@@ -237,7 +237,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemOverlapMerge</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2783">WorkItemOverlapMerge</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlapgroup,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2828">WorkItemOverlapMerge</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true" title="class or interface in java.util">Collection</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&gt;&nbsp;overlapgroup,
                      <a href="../../../../../org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.html" title="interface in org.apache.hadoop.hbase.util.hbck">TableIntegrityErrorHandler</a>&nbsp;handler)</pre>
 </li>
 </ul>
@@ -255,7 +255,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2789">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html#line.2834">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index 59d5325..ffea861 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4294">HBaseFsck.WorkItemRegion</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4339">HBaseFsck.WorkItemRegion</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&gt;</pre>
 <div class="block">Contact a region server and get all information from it</div>
@@ -226,7 +226,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>hbck</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4295">hbck</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4340">hbck</a></pre>
 </li>
 </ul>
 <a name="rsinfo">
@@ -235,7 +235,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>rsinfo</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4296">rsinfo</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4341">rsinfo</a></pre>
 </li>
 </ul>
 <a name="errors">
@@ -244,7 +244,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>errors</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4297">errors</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4342">errors</a></pre>
 </li>
 </ul>
 <a name="connection">
@@ -253,7 +253,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>connection</h4>
-<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4298">connection</a></pre>
+<pre>private final&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4343">connection</a></pre>
 </li>
 </ul>
 </li>
@@ -270,7 +270,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>WorkItemRegion</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4300">WorkItemRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a>&nbsp;hbck,
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4345">WorkItemRegion</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck</a>&nbsp;hbck,
                <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;info,
                <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a>&nbsp;errors,
                <a href="../../../../../org/apache/hadoop/hbase/client/ClusterConnection.html" title="interface in org.apache.hadoop.hbase.client">ClusterConnection</a>&nbsp;connection)</pre>
@@ -290,7 +290,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockList">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4309">call</a>()
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true" title="class or interface in java.lang">Void</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4354">call</a>()
           throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -306,7 +306,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>filterRegions</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4343">filterRegions</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;regions)</pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html#line.4388">filterRegions</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&gt;&nbsp;regions)</pre>
 </li>
 </ul>
 </li>


[29/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
index 57e04f3..7edfec9 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
@@ -1928,36 +1928,43 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods.</div>
 </tr>
 <tbody>
 <tr class="altColor">
+<td class="colFirst"><code>static <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;org.apache.hadoop.fs.Path,org.apache.hadoop.fs.FSDataOutputStream&gt;</code></td>
+<td class="colLast"><span class="typeNameLabel">HBaseFsck.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#checkAndMarkRunningHbck-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">checkAndMarkRunningHbck</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+                       <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>
+<div class="block">This method maintains a lock using a file.</div>
+</td>
+</tr>
+<tr class="rowColor">
 <td class="colFirst"><code>private <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>&gt;</code></td>
 <td class="colLast"><span class="typeNameLabel">MunkresAssignment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/MunkresAssignment.html#findUncoveredZero--">findUncoveredZero</a></span>()</code>
 <div class="block">Find a zero cost assignment which is not covered.</div>
 </td>
 </tr>
-<tr class="rowColor">
+<tr class="altColor">
 <td class="colFirst"><code>private static <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path&gt;</code></td>
 <td class="colLast"><span class="typeNameLabel">RegionSplitter.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/RegionSplitter.html#getTableDirAndSplitFile-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.TableName-">getTableDirAndSplitFile</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
                        <a href="../../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;tableName)</code>&nbsp;</td>
 </tr>
-<tr class="altColor">
+<tr class="rowColor">
 <td class="colFirst"><code>static &lt;T1,T2&gt;&nbsp;<a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;T1,T2&gt;</code></td>
 <td class="colLast"><span class="typeNameLabel">Pair.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html#newPair-T1-T2-">newPair</a></span>(T1&nbsp;a,
        T2&nbsp;b)</code>
 <div class="block">Constructs a new pair, inferring the type via the passed arguments</div>
 </td>
 </tr>
-<tr class="rowColor">
+<tr class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>&gt;</code></td>
 <td class="colLast"><span class="typeNameLabel">MunkresAssignment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/MunkresAssignment.html#primeInRow-int-">primeInRow</a></span>(int&nbsp;r)</code>
 <div class="block">Find a primed zero in the specified row.</div>
 </td>
 </tr>
-<tr class="altColor">
+<tr class="rowColor">
 <td class="colFirst"><code>private <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>&gt;</code></td>
 <td class="colLast"><span class="typeNameLabel">MunkresAssignment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/MunkresAssignment.html#starInCol-int-">starInCol</a></span>(int&nbsp;c)</code>
 <div class="block">Find a starred zero in the specified column.</div>
 </td>
 </tr>
-<tr class="rowColor">
+<tr class="altColor">
 <td class="colFirst"><code>private <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>,<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true" title="class or interface in java.lang">Integer</a>&gt;</code></td>
 <td class="colLast"><span class="typeNameLabel">MunkresAssignment.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/MunkresAssignment.html#starInRow-int-">starInRow</a></span>(int&nbsp;r)</code>
 <div class="block">Find a starred zero in a specified row.</div>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounter.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounter.html b/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounter.html
index e4bb74d..98e3560 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounter.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounter.html
@@ -125,6 +125,22 @@
 </tr>
 </tbody>
 </table>
+<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
+<caption><span>Methods in <a href="../../../../../../org/apache/hadoop/hbase/util/package-summary.html">org.apache.hadoop.hbase.util</a> with parameters of type <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a></span><span class="tabEnd">&nbsp;</span></caption>
+<tr>
+<th class="colFirst" scope="col">Modifier and Type</th>
+<th class="colLast" scope="col">Method and Description</th>
+</tr>
+<tbody>
+<tr class="altColor">
+<td class="colFirst"><code>static <a href="../../../../../../org/apache/hadoop/hbase/util/Pair.html" title="class in org.apache.hadoop.hbase.util">Pair</a>&lt;org.apache.hadoop.fs.Path,org.apache.hadoop.fs.FSDataOutputStream&gt;</code></td>
+<td class="colLast"><span class="typeNameLabel">HBaseFsck.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#checkAndMarkRunningHbck-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">checkAndMarkRunningHbck</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+                       <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>
+<div class="block">This method maintains a lock using a file.</div>
+</td>
+</tr>
+</tbody>
+</table>
 <table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing constructors, and an explanation">
 <caption><span>Constructors in <a href="../../../../../../org/apache/hadoop/hbase/util/package-summary.html">org.apache.hadoop.hbase.util</a> with parameters of type <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a></span><span class="tabEnd">&nbsp;</span></caption>
 <tr>
@@ -132,7 +148,8 @@
 </tr>
 <tbody>
 <tr class="altColor">
-<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#FileLockCallable-org.apache.hadoop.hbase.util.RetryCounter-">FileLockCallable</a></span>(<a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>&nbsp;</td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#FileLockCallable-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">FileLockCallable</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+                <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>&nbsp;</td>
 </tr>
 </tbody>
 </table>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounterFactory.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounterFactory.html b/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounterFactory.html
index c9934a2..4988137 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounterFactory.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/RetryCounterFactory.html
@@ -116,6 +116,23 @@
 </tr>
 </tbody>
 </table>
+<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
+<caption><span>Methods in <a href="../../../../../../org/apache/hadoop/hbase/util/package-summary.html">org.apache.hadoop.hbase.util</a> that return <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a></span><span class="tabEnd">&nbsp;</span></caption>
+<tr>
+<th class="colFirst" scope="col">Modifier and Type</th>
+<th class="colLast" scope="col">Method and Description</th>
+</tr>
+<tbody>
+<tr class="altColor">
+<td class="colFirst"><code>static <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a></code></td>
+<td class="colLast"><span class="typeNameLabel">HBaseFsck.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createLockRetryCounterFactory-org.apache.hadoop.conf.Configuration-">createLockRetryCounterFactory</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
+</tr>
+<tr class="rowColor">
+<td class="colFirst"><code>private static <a href="../../../../../../org/apache/hadoop/hbase/util/RetryCounterFactory.html" title="class in org.apache.hadoop.hbase.util">RetryCounterFactory</a></code></td>
+<td class="colLast"><span class="typeNameLabel">HBaseFsck.</span><code><span class="memberNameLink"><a href="../../../../../../org/apache/hadoop/hbase/util/HBaseFsck.html#createZnodeRetryCounterFactory-org.apache.hadoop.conf.Configuration-">createZnodeRetryCounterFactory</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf)</code>&nbsp;</td>
+</tr>
+</tbody>
+</table>
 </li>
 <li class="blockList"><a name="org.apache.hadoop.hbase.zookeeper">
 <!--   -->

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-frame.html b/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
index ac55ac3..13c20a2 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
@@ -120,6 +120,7 @@
 <li><a href="HasThread.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HasThread</a></li>
 <li><a href="HBaseConfTool.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseConfTool</a></li>
 <li><a href="HBaseFsck.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck</a></li>
+<li><a href="HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.FileLockCallable</a></li>
 <li><a href="HBaseFsck.HBaseFsckTool.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.HBaseFsckTool</a></li>
 <li><a href="HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.HbckInfo</a></li>
 <li><a href="HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util" target="classFrame">HBaseFsck.HdfsEntry</a></li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-summary.html b/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
index 9f425bd..6065723 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
@@ -707,6 +707,10 @@
  table integrity problems in a corrupted HBase.</div>
 </td>
 </tr>
+<tr class="rowColor">
+<td class="colFirst"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.FileLockCallable</a></td>
+<td class="colLast">&nbsp;</td>
+</tr>
 <tr class="altColor">
 <td class="colFirst"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HBaseFsckTool</a></td>
 <td class="colLast">

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index d611005..f4aa0db 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -515,14 +515,14 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/ChecksumType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">ChecksumType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PrettyPrinter.Unit.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PrettyPrinter.Unit</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Bytes.LexicographicalComparerHolder.PureJavaComparer</span></a> (implements org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.Comparer.html" title="interface in org.apache.hadoop.hbase.util">Bytes.Comparer</a>&lt;T&gt;)</li>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/IdReadWriteLock.ReferenceType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">IdReadWriteLock.ReferenceType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Bytes.LexicographicalComparerHolder.UnsafeComparer</span></a> (implements org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Bytes.Comparer.html" title="interface in org.apache.hadoop.hbase.util">Bytes.Comparer</a>&lt;T&gt;)</li>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/ChecksumType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">ChecksumType</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/IdReadWriteLock.ReferenceType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">IdReadWriteLock.ReferenceType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PoolMap.PoolType.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PoolMap.PoolType</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/Order.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">Order</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">HBaseFsck.ErrorReporter.ERROR_CODE</span></a></li>
-<li type="circle">org.apache.hadoop.hbase.util.<a href="../../../../../org/apache/hadoop/hbase/util/PrettyPrinter.Unit.html" title="enum in org.apache.hadoop.hbase.util"><span class="typeNameLink">PrettyPrinter.Unit</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index bcb1724..c6caa9b 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -191,8 +191,8 @@
 <ul>
 <li type="circle">java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true" title="class or interface in java.lang"><span class="typeNameLink">Enum</span></a>&lt;E&gt; (implements java.lang.<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true" title="class or interface in java.lang">Comparable</a>&lt;T&gt;, java.io.<a href="https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a>)
 <ul>
-<li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">RegionGroupingProvider.Strategies</span></a></li>
 <li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/WALFactory.Providers.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">WALFactory.Providers</span></a></li>
+<li type="circle">org.apache.hadoop.hbase.wal.<a href="../../../../../org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html" title="enum in org.apache.hadoop.hbase.wal"><span class="typeNameLink">RegionGroupingProvider.Strategies</span></a></li>
 </ul>
 </li>
 </ul>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 445ef00..00a1fac 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 <span class="sourceLineNo">008</span>@InterfaceAudience.Private<a name="line.8"></a>
 <span class="sourceLineNo">009</span>public class Version {<a name="line.9"></a>
 <span class="sourceLineNo">010</span>  public static final String version = "3.0.0-SNAPSHOT";<a name="line.10"></a>
-<span class="sourceLineNo">011</span>  public static final String revision = "a452487a9b82bfd33bc10683c3f8b8ae74d58883";<a name="line.11"></a>
+<span class="sourceLineNo">011</span>  public static final String revision = "409e742ac3bdbff027b136a87339f4f5511da07d";<a name="line.11"></a>
 <span class="sourceLineNo">012</span>  public static final String user = "jenkins";<a name="line.12"></a>
-<span class="sourceLineNo">013</span>  public static final String date = "Fri Aug 24 14:38:46 UTC 2018";<a name="line.13"></a>
+<span class="sourceLineNo">013</span>  public static final String date = "Sat Aug 25 14:39:06 UTC 2018";<a name="line.13"></a>
 <span class="sourceLineNo">014</span>  public static final String url = "git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";<a name="line.14"></a>
-<span class="sourceLineNo">015</span>  public static final String srcChecksum = "6a771691f343c60ea56a144f9db58ab5";<a name="line.15"></a>
+<span class="sourceLineNo">015</span>  public static final String srcChecksum = "574f44946cbfd790ca8dd655ada11008";<a name="line.15"></a>
 <span class="sourceLineNo">016</span>}<a name="line.16"></a>
 
 


[16/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      Pair&lt;Path, FSD

<TRUNCATED>

[11/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+

<TRUNCATED>

[28/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 7df71bd..a990153 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -201,3634 +201,3643 @@
 <span class="sourceLineNo">193</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.193"></a>
 <span class="sourceLineNo">194</span>import org.apache.hadoop.hbase.util.CompressionTest;<a name="line.194"></a>
 <span class="sourceLineNo">195</span>import org.apache.hadoop.hbase.util.EncryptionTest;<a name="line.195"></a>
-<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.196"></a>
-<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.197"></a>
-<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.199"></a>
-<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.200"></a>
-<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.201"></a>
-<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.203"></a>
-<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.204"></a>
-<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.206"></a>
-<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.210"></a>
-<span class="sourceLineNo">211</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.211"></a>
-<span class="sourceLineNo">212</span>import org.apache.zookeeper.KeeperException;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>import org.eclipse.jetty.server.Server;<a name="line.213"></a>
-<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.215"></a>
-<span class="sourceLineNo">216</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>import org.slf4j.Logger;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>import org.slf4j.LoggerFactory;<a name="line.218"></a>
-<span class="sourceLineNo">219</span><a name="line.219"></a>
-<span class="sourceLineNo">220</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.222"></a>
-<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.223"></a>
-<span class="sourceLineNo">224</span><a name="line.224"></a>
-<span class="sourceLineNo">225</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.226"></a>
-<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.227"></a>
-<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.230"></a>
-<span class="sourceLineNo">231</span><a name="line.231"></a>
-<span class="sourceLineNo">232</span>/**<a name="line.232"></a>
-<span class="sourceLineNo">233</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.233"></a>
-<span class="sourceLineNo">234</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.234"></a>
-<span class="sourceLineNo">235</span> * run the cluster.  All others park themselves in their constructor until<a name="line.235"></a>
-<span class="sourceLineNo">236</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.236"></a>
-<span class="sourceLineNo">237</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.237"></a>
-<span class="sourceLineNo">238</span> *<a name="line.238"></a>
-<span class="sourceLineNo">239</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.239"></a>
-<span class="sourceLineNo">240</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.240"></a>
-<span class="sourceLineNo">241</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.241"></a>
-<span class="sourceLineNo">242</span> *<a name="line.242"></a>
-<span class="sourceLineNo">243</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.243"></a>
-<span class="sourceLineNo">244</span> *<a name="line.244"></a>
-<span class="sourceLineNo">245</span> * @see org.apache.zookeeper.Watcher<a name="line.245"></a>
-<span class="sourceLineNo">246</span> */<a name="line.246"></a>
-<span class="sourceLineNo">247</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.247"></a>
-<span class="sourceLineNo">248</span>@SuppressWarnings("deprecation")<a name="line.248"></a>
-<span class="sourceLineNo">249</span>public class HMaster extends HRegionServer implements MasterServices {<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private static Logger LOG = LoggerFactory.getLogger(HMaster.class);<a name="line.250"></a>
-<span class="sourceLineNo">251</span><a name="line.251"></a>
-<span class="sourceLineNo">252</span>  /**<a name="line.252"></a>
-<span class="sourceLineNo">253</span>   * Protection against zombie master. Started once Master accepts active responsibility and<a name="line.253"></a>
-<span class="sourceLineNo">254</span>   * starts taking over responsibilities. Allows a finite time window before giving up ownership.<a name="line.254"></a>
-<span class="sourceLineNo">255</span>   */<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private static class InitializationMonitor extends HasThread {<a name="line.256"></a>
-<span class="sourceLineNo">257</span>    /** The amount of time in milliseconds to sleep before checking initialization status. */<a name="line.257"></a>
-<span class="sourceLineNo">258</span>    public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";<a name="line.258"></a>
-<span class="sourceLineNo">259</span>    public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);<a name="line.259"></a>
-<span class="sourceLineNo">260</span><a name="line.260"></a>
-<span class="sourceLineNo">261</span>    /**<a name="line.261"></a>
-<span class="sourceLineNo">262</span>     * When timeout expired and initialization has not complete, call {@link System#exit(int)} when<a name="line.262"></a>
-<span class="sourceLineNo">263</span>     * true, do nothing otherwise.<a name="line.263"></a>
-<span class="sourceLineNo">264</span>     */<a name="line.264"></a>
-<span class="sourceLineNo">265</span>    public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";<a name="line.265"></a>
-<span class="sourceLineNo">266</span>    public static final boolean HALT_DEFAULT = false;<a name="line.266"></a>
-<span class="sourceLineNo">267</span><a name="line.267"></a>
-<span class="sourceLineNo">268</span>    private final HMaster master;<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    private final long timeout;<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    private final boolean haltOnTimeout;<a name="line.270"></a>
-<span class="sourceLineNo">271</span><a name="line.271"></a>
-<span class="sourceLineNo">272</span>    /** Creates a Thread that monitors the {@link #isInitialized()} state. */<a name="line.272"></a>
-<span class="sourceLineNo">273</span>    InitializationMonitor(HMaster master) {<a name="line.273"></a>
-<span class="sourceLineNo">274</span>      super("MasterInitializationMonitor");<a name="line.274"></a>
-<span class="sourceLineNo">275</span>      this.master = master;<a name="line.275"></a>
-<span class="sourceLineNo">276</span>      this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);<a name="line.276"></a>
-<span class="sourceLineNo">277</span>      this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);<a name="line.277"></a>
-<span class="sourceLineNo">278</span>      this.setDaemon(true);<a name="line.278"></a>
-<span class="sourceLineNo">279</span>    }<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>    @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>    public void run() {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>      try {<a name="line.283"></a>
-<span class="sourceLineNo">284</span>        while (!master.isStopped() &amp;&amp; master.isActiveMaster()) {<a name="line.284"></a>
-<span class="sourceLineNo">285</span>          Thread.sleep(timeout);<a name="line.285"></a>
-<span class="sourceLineNo">286</span>          if (master.isInitialized()) {<a name="line.286"></a>
-<span class="sourceLineNo">287</span>            LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");<a name="line.287"></a>
-<span class="sourceLineNo">288</span>          } else {<a name="line.288"></a>
-<span class="sourceLineNo">289</span>            LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"<a name="line.289"></a>
-<span class="sourceLineNo">290</span>                + " consider submitting a bug report including a thread dump of this process.");<a name="line.290"></a>
-<span class="sourceLineNo">291</span>            if (haltOnTimeout) {<a name="line.291"></a>
-<span class="sourceLineNo">292</span>              LOG.error("Zombie Master exiting. Thread dump to stdout");<a name="line.292"></a>
-<span class="sourceLineNo">293</span>              Threads.printThreadInfo(System.out, "Zombie HMaster");<a name="line.293"></a>
-<span class="sourceLineNo">294</span>              System.exit(-1);<a name="line.294"></a>
-<span class="sourceLineNo">295</span>            }<a name="line.295"></a>
-<span class="sourceLineNo">296</span>          }<a name="line.296"></a>
-<span class="sourceLineNo">297</span>        }<a name="line.297"></a>
-<span class="sourceLineNo">298</span>      } catch (InterruptedException ie) {<a name="line.298"></a>
-<span class="sourceLineNo">299</span>        LOG.trace("InitMonitor thread interrupted. Existing.");<a name="line.299"></a>
-<span class="sourceLineNo">300</span>      }<a name="line.300"></a>
-<span class="sourceLineNo">301</span>    }<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  }<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  // MASTER is name of the webapp and the attribute name used stuffing this<a name="line.304"></a>
-<span class="sourceLineNo">305</span>  //instance into web context.<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  public static final String MASTER = "master";<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  // Manager and zk listener for master election<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private final ActiveMasterManager activeMasterManager;<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Region server tracker<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private RegionServerTracker regionServerTracker;<a name="line.311"></a>
-<span class="sourceLineNo">312</span>  // Draining region server tracker<a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private DrainingServerTracker drainingServerTracker;<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  // Tracker for load balancer state<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  LoadBalancerTracker loadBalancerTracker;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  // Tracker for meta location, if any client ZK quorum specified<a name="line.316"></a>
-<span class="sourceLineNo">317</span>  MetaLocationSyncer metaLocationSyncer;<a name="line.317"></a>
-<span class="sourceLineNo">318</span>  // Tracker for active master location, if any client ZK quorum specified<a name="line.318"></a>
-<span class="sourceLineNo">319</span>  MasterAddressSyncer masterAddressSyncer;<a name="line.319"></a>
-<span class="sourceLineNo">320</span><a name="line.320"></a>
-<span class="sourceLineNo">321</span>  // Tracker for split and merge state<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private SplitOrMergeTracker splitOrMergeTracker;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  // Tracker for region normalizer state<a name="line.324"></a>
-<span class="sourceLineNo">325</span>  private RegionNormalizerTracker regionNormalizerTracker;<a name="line.325"></a>
-<span class="sourceLineNo">326</span><a name="line.326"></a>
-<span class="sourceLineNo">327</span>  //Tracker for master maintenance mode setting<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private MasterMaintenanceModeTracker maintenanceModeTracker;<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private ClusterSchemaService clusterSchemaService;<a name="line.330"></a>
-<span class="sourceLineNo">331</span><a name="line.331"></a>
-<span class="sourceLineNo">332</span>  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    "hbase.master.wait.on.service.seconds";<a name="line.333"></a>
-<span class="sourceLineNo">334</span>  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;<a name="line.334"></a>
-<span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  // Metrics for the HMaster<a name="line.336"></a>
-<span class="sourceLineNo">337</span>  final MetricsMaster metricsMaster;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  // file system manager for the master FS operations<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private MasterFileSystem fileSystemManager;<a name="line.339"></a>
-<span class="sourceLineNo">340</span>  private MasterWalManager walManager;<a name="line.340"></a>
-<span class="sourceLineNo">341</span><a name="line.341"></a>
-<span class="sourceLineNo">342</span>  // server manager to deal with region server info<a name="line.342"></a>
-<span class="sourceLineNo">343</span>  private volatile ServerManager serverManager;<a name="line.343"></a>
-<span class="sourceLineNo">344</span><a name="line.344"></a>
-<span class="sourceLineNo">345</span>  // manager of assignment nodes in zookeeper<a name="line.345"></a>
-<span class="sourceLineNo">346</span>  private AssignmentManager assignmentManager;<a name="line.346"></a>
-<span class="sourceLineNo">347</span><a name="line.347"></a>
-<span class="sourceLineNo">348</span>  // manager of replication<a name="line.348"></a>
-<span class="sourceLineNo">349</span>  private ReplicationPeerManager replicationPeerManager;<a name="line.349"></a>
-<span class="sourceLineNo">350</span><a name="line.350"></a>
-<span class="sourceLineNo">351</span>  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;<a name="line.351"></a>
-<span class="sourceLineNo">352</span><a name="line.352"></a>
-<span class="sourceLineNo">353</span>  // buffer for "fatal error" notices from region servers<a name="line.353"></a>
-<span class="sourceLineNo">354</span>  // in the cluster. This is only used for assisting<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  // operations/debugging.<a name="line.355"></a>
-<span class="sourceLineNo">356</span>  MemoryBoundedLogMessageBuffer rsFatals;<a name="line.356"></a>
-<span class="sourceLineNo">357</span><a name="line.357"></a>
-<span class="sourceLineNo">358</span>  // flag set after we become the active master (used for testing)<a name="line.358"></a>
-<span class="sourceLineNo">359</span>  private volatile boolean activeMaster = false;<a name="line.359"></a>
-<span class="sourceLineNo">360</span><a name="line.360"></a>
-<span class="sourceLineNo">361</span>  // flag set after we complete initialization once active<a name="line.361"></a>
-<span class="sourceLineNo">362</span>  private final ProcedureEvent&lt;?&gt; initialized = new ProcedureEvent&lt;&gt;("master initialized");<a name="line.362"></a>
-<span class="sourceLineNo">363</span><a name="line.363"></a>
-<span class="sourceLineNo">364</span>  // flag set after master services are started,<a name="line.364"></a>
-<span class="sourceLineNo">365</span>  // initialization may have not completed yet.<a name="line.365"></a>
-<span class="sourceLineNo">366</span>  volatile boolean serviceStarted = false;<a name="line.366"></a>
-<span class="sourceLineNo">367</span><a name="line.367"></a>
-<span class="sourceLineNo">368</span>  // Maximum time we should run balancer for<a name="line.368"></a>
-<span class="sourceLineNo">369</span>  private final int maxBlancingTime;<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  // Maximum percent of regions in transition when balancing<a name="line.370"></a>
-<span class="sourceLineNo">371</span>  private final double maxRitPercent;<a name="line.371"></a>
-<span class="sourceLineNo">372</span><a name="line.372"></a>
-<span class="sourceLineNo">373</span>  private final LockManager lockManager = new LockManager(this);<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>  private LoadBalancer balancer;<a name="line.375"></a>
-<span class="sourceLineNo">376</span>  private RegionNormalizer normalizer;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>  private BalancerChore balancerChore;<a name="line.377"></a>
-<span class="sourceLineNo">378</span>  private RegionNormalizerChore normalizerChore;<a name="line.378"></a>
-<span class="sourceLineNo">379</span>  private ClusterStatusChore clusterStatusChore;<a name="line.379"></a>
-<span class="sourceLineNo">380</span>  private ClusterStatusPublisher clusterStatusPublisherChore = null;<a name="line.380"></a>
-<span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  CatalogJanitor catalogJanitorChore;<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  private LogCleaner logCleaner;<a name="line.383"></a>
-<span class="sourceLineNo">384</span>  private HFileCleaner hfileCleaner;<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  private ReplicationBarrierCleaner replicationBarrierCleaner;<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  private MobCompactionChore mobCompactChore;<a name="line.387"></a>
-<span class="sourceLineNo">388</span>  private MasterMobCompactionThread mobCompactThread;<a name="line.388"></a>
-<span class="sourceLineNo">389</span>  // used to synchronize the mobCompactionStates<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  private final IdLock mobCompactionLock = new IdLock();<a name="line.390"></a>
-<span class="sourceLineNo">391</span>  // save the information of mob compactions in tables.<a name="line.391"></a>
-<span class="sourceLineNo">392</span>  // the key is table name, the value is the number of compactions in that table.<a name="line.392"></a>
-<span class="sourceLineNo">393</span>  private Map&lt;TableName, AtomicInteger&gt; mobCompactionStates = Maps.newConcurrentMap();<a name="line.393"></a>
-<span class="sourceLineNo">394</span><a name="line.394"></a>
-<span class="sourceLineNo">395</span>  MasterCoprocessorHost cpHost;<a name="line.395"></a>
-<span class="sourceLineNo">396</span><a name="line.396"></a>
-<span class="sourceLineNo">397</span>  private final boolean preLoadTableDescriptors;<a name="line.397"></a>
-<span class="sourceLineNo">398</span><a name="line.398"></a>
-<span class="sourceLineNo">399</span>  // Time stamps for when a hmaster became active<a name="line.399"></a>
-<span class="sourceLineNo">400</span>  private long masterActiveTime;<a name="line.400"></a>
-<span class="sourceLineNo">401</span><a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // Time stamp for when HMaster finishes becoming Active Master<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  private long masterFinishedInitializationTime;<a name="line.403"></a>
-<span class="sourceLineNo">404</span><a name="line.404"></a>
-<span class="sourceLineNo">405</span>  //should we check the compression codec type at master side, default true, HBASE-6370<a name="line.405"></a>
-<span class="sourceLineNo">406</span>  private final boolean masterCheckCompression;<a name="line.406"></a>
-<span class="sourceLineNo">407</span><a name="line.407"></a>
-<span class="sourceLineNo">408</span>  //should we check encryption settings at master side, default true<a name="line.408"></a>
-<span class="sourceLineNo">409</span>  private final boolean masterCheckEncryption;<a name="line.409"></a>
-<span class="sourceLineNo">410</span><a name="line.410"></a>
-<span class="sourceLineNo">411</span>  Map&lt;String, Service&gt; coprocessorServiceHandlers = Maps.newHashMap();<a name="line.411"></a>
-<span class="sourceLineNo">412</span><a name="line.412"></a>
-<span class="sourceLineNo">413</span>  // monitor for snapshot of hbase tables<a name="line.413"></a>
-<span class="sourceLineNo">414</span>  SnapshotManager snapshotManager;<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  // monitor for distributed procedures<a name="line.415"></a>
-<span class="sourceLineNo">416</span>  private MasterProcedureManagerHost mpmHost;<a name="line.416"></a>
-<span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  // it is assigned after 'initialized' guard set to true, so should be volatile<a name="line.418"></a>
-<span class="sourceLineNo">419</span>  private volatile MasterQuotaManager quotaManager;<a name="line.419"></a>
-<span class="sourceLineNo">420</span>  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  private QuotaObserverChore quotaObserverChore;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  private SnapshotQuotaObserverChore snapshotQuotaChore;<a name="line.422"></a>
-<span class="sourceLineNo">423</span><a name="line.423"></a>
-<span class="sourceLineNo">424</span>  private ProcedureExecutor&lt;MasterProcedureEnv&gt; procedureExecutor;<a name="line.424"></a>
-<span class="sourceLineNo">425</span>  private WALProcedureStore procedureStore;<a name="line.425"></a>
-<span class="sourceLineNo">426</span><a name="line.426"></a>
-<span class="sourceLineNo">427</span>  // handle table states<a name="line.427"></a>
-<span class="sourceLineNo">428</span>  private TableStateManager tableStateManager;<a name="line.428"></a>
-<span class="sourceLineNo">429</span><a name="line.429"></a>
-<span class="sourceLineNo">430</span>  private long splitPlanCount;<a name="line.430"></a>
-<span class="sourceLineNo">431</span>  private long mergePlanCount;<a name="line.431"></a>
-<span class="sourceLineNo">432</span><a name="line.432"></a>
-<span class="sourceLineNo">433</span>  /* Handle favored nodes information */<a name="line.433"></a>
-<span class="sourceLineNo">434</span>  private FavoredNodesManager favoredNodesManager;<a name="line.434"></a>
-<span class="sourceLineNo">435</span><a name="line.435"></a>
-<span class="sourceLineNo">436</span>  /** jetty server for master to redirect requests to regionserver infoServer */<a name="line.436"></a>
-<span class="sourceLineNo">437</span>  private Server masterJettyServer;<a name="line.437"></a>
-<span class="sourceLineNo">438</span><a name="line.438"></a>
-<span class="sourceLineNo">439</span>  public static class RedirectServlet extends HttpServlet {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    private static final long serialVersionUID = 2894774810058302473L;<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    private final int regionServerInfoPort;<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    private final String regionServerHostname;<a name="line.442"></a>
-<span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>    /**<a name="line.444"></a>
-<span class="sourceLineNo">445</span>     * @param infoServer that we're trying to send all requests to<a name="line.445"></a>
-<span class="sourceLineNo">446</span>     * @param hostname may be null. if given, will be used for redirects instead of host from client.<a name="line.446"></a>
-<span class="sourceLineNo">447</span>     */<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    public RedirectServlet(InfoServer infoServer, String hostname) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>       regionServerInfoPort = infoServer.getPort();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>       regionServerHostname = hostname;<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    }<a name="line.451"></a>
-<span class="sourceLineNo">452</span><a name="line.452"></a>
-<span class="sourceLineNo">453</span>    @Override<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    public void doGet(HttpServletRequest request,<a name="line.454"></a>
-<span class="sourceLineNo">455</span>        HttpServletResponse response) throws ServletException, IOException {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      String redirectHost = regionServerHostname;<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      if(redirectHost == null) {<a name="line.457"></a>
-<span class="sourceLineNo">458</span>        redirectHost = request.getServerName();<a name="line.458"></a>
-<span class="sourceLineNo">459</span>        if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {<a name="line.459"></a>
-<span class="sourceLineNo">460</span>          LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +<a name="line.460"></a>
-<span class="sourceLineNo">461</span>              MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +<a name="line.461"></a>
-<span class="sourceLineNo">462</span>              "your HBase deployment relies on client accessible names that the region server process " +<a name="line.462"></a>
-<span class="sourceLineNo">463</span>              "can't resolve locally, then you should set the previously mentioned configuration variable " +<a name="line.463"></a>
-<span class="sourceLineNo">464</span>              "to an appropriate hostname.");<a name="line.464"></a>
-<span class="sourceLineNo">465</span>          // no sending client provided input back to the client, so the goal host is just in the logs.<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +<a name="line.466"></a>
-<span class="sourceLineNo">467</span>              "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          return;<a name="line.469"></a>
-<span class="sourceLineNo">470</span>        }<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      }<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      // host and port we're using, but it's buried way too deep to do that ATM.<a name="line.473"></a>
-<span class="sourceLineNo">474</span>      String redirectUrl = request.getScheme() + "://"<a name="line.474"></a>
-<span class="sourceLineNo">475</span>        + redirectHost + ":" + regionServerInfoPort<a name="line.475"></a>
-<span class="sourceLineNo">476</span>        + request.getRequestURI();<a name="line.476"></a>
-<span class="sourceLineNo">477</span>      response.sendRedirect(redirectUrl);<a name="line.477"></a>
-<span class="sourceLineNo">478</span>    }<a name="line.478"></a>
-<span class="sourceLineNo">479</span>  }<a name="line.479"></a>
-<span class="sourceLineNo">480</span><a name="line.480"></a>
-<span class="sourceLineNo">481</span>  /**<a name="line.481"></a>
-<span class="sourceLineNo">482</span>   * Initializes the HMaster. The steps are as follows:<a name="line.482"></a>
-<span class="sourceLineNo">483</span>   * &lt;p&gt;<a name="line.483"></a>
-<span class="sourceLineNo">484</span>   * &lt;ol&gt;<a name="line.484"></a>
-<span class="sourceLineNo">485</span>   * &lt;li&gt;Initialize the local HRegionServer<a name="line.485"></a>
-<span class="sourceLineNo">486</span>   * &lt;li&gt;Start the ActiveMasterManager.<a name="line.486"></a>
-<span class="sourceLineNo">487</span>   * &lt;/ol&gt;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>   * &lt;p&gt;<a name="line.488"></a>
-<span class="sourceLineNo">489</span>   * Remaining steps of initialization occur in<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * #finishActiveMasterInitialization(MonitoredTask) after<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * the master becomes the active one.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public HMaster(final Configuration conf)<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      throws IOException, KeeperException {<a name="line.494"></a>
-<span class="sourceLineNo">495</span>    super(conf);<a name="line.495"></a>
-<span class="sourceLineNo">496</span>    TraceUtil.initTracer(conf);<a name="line.496"></a>
-<span class="sourceLineNo">497</span>    try {<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      this.rsFatals = new MemoryBoundedLogMessageBuffer(<a name="line.498"></a>
-<span class="sourceLineNo">499</span>          conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      LOG.info("hbase.rootdir=" + getRootDir() +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>          ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));<a name="line.501"></a>
-<span class="sourceLineNo">502</span><a name="line.502"></a>
-<span class="sourceLineNo">503</span>      // Disable usage of meta replicas in the master<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      decorateMasterConfiguration(this.conf);<a name="line.506"></a>
-<span class="sourceLineNo">507</span><a name="line.507"></a>
-<span class="sourceLineNo">508</span>      // Hack! Maps DFSClient =&gt; Master for logs.  HDFS made this<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      // config param for task trackers, but we can piggyback off of it.<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      if (this.conf.get("mapreduce.task.attempt.id") == null) {<a name="line.510"></a>
-<span class="sourceLineNo">511</span>        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      }<a name="line.512"></a>
-<span class="sourceLineNo">513</span><a name="line.513"></a>
-<span class="sourceLineNo">514</span>      // should we check the compression codec type at master side, default true, HBASE-6370<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);<a name="line.515"></a>
-<span class="sourceLineNo">516</span><a name="line.516"></a>
-<span class="sourceLineNo">517</span>      // should we check encryption settings at master side, default true<a name="line.517"></a>
-<span class="sourceLineNo">518</span>      this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);<a name="line.518"></a>
-<span class="sourceLineNo">519</span><a name="line.519"></a>
-<span class="sourceLineNo">520</span>      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));<a name="line.520"></a>
-<span class="sourceLineNo">521</span><a name="line.521"></a>
-<span class="sourceLineNo">522</span>      // preload table descriptor at startup<a name="line.522"></a>
-<span class="sourceLineNo">523</span>      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>      this.maxBlancingTime = getMaxBalancingTime();<a name="line.525"></a>
-<span class="sourceLineNo">526</span>      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,<a name="line.526"></a>
-<span class="sourceLineNo">527</span>          HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);<a name="line.527"></a>
-<span class="sourceLineNo">528</span><a name="line.528"></a>
-<span class="sourceLineNo">529</span>      // Do we publish the status?<a name="line.529"></a>
-<span class="sourceLineNo">530</span><a name="line.530"></a>
-<span class="sourceLineNo">531</span>      boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,<a name="line.531"></a>
-<span class="sourceLineNo">532</span>          HConstants.STATUS_PUBLISHED_DEFAULT);<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      Class&lt;? extends ClusterStatusPublisher.Publisher&gt; publisherClass =<a name="line.533"></a>
-<span class="sourceLineNo">534</span>          conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,<a name="line.534"></a>
-<span class="sourceLineNo">535</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,<a name="line.535"></a>
-<span class="sourceLineNo">536</span>              ClusterStatusPublisher.Publisher.class);<a name="line.536"></a>
-<span class="sourceLineNo">537</span><a name="line.537"></a>
-<span class="sourceLineNo">538</span>      if (shouldPublish) {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        if (publisherClass == null) {<a name="line.539"></a>
-<span class="sourceLineNo">540</span>          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +<a name="line.540"></a>
-<span class="sourceLineNo">541</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +<a name="line.541"></a>
-<span class="sourceLineNo">542</span>              " is not set - not publishing status");<a name="line.542"></a>
-<span class="sourceLineNo">543</span>        } else {<a name="line.543"></a>
-<span class="sourceLineNo">544</span>          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>          getChoreService().scheduleChore(clusterStatusPublisherChore);<a name="line.545"></a>
-<span class="sourceLineNo">546</span>        }<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      }<a name="line.547"></a>
-<span class="sourceLineNo">548</span><a name="line.548"></a>
-<span class="sourceLineNo">549</span>      // Some unit tests don't need a cluster, so no zookeeper at all<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      } else {<a name="line.552"></a>
-<span class="sourceLineNo">553</span>        this.activeMasterManager = null;<a name="line.553"></a>
-<span class="sourceLineNo">554</span>      }<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    } catch (Throwable t) {<a name="line.555"></a>
-<span class="sourceLineNo">556</span>      // Make sure we log the exception. HMaster is often started via reflection and the<a name="line.556"></a>
-<span class="sourceLineNo">557</span>      // cause of failed startup is lost.<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      LOG.error("Failed construction of Master", t);<a name="line.558"></a>
-<span class="sourceLineNo">559</span>      throw t;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>  }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>  @Override<a name="line.563"></a>
-<span class="sourceLineNo">564</span>  protected String getUseThisHostnameInstead(Configuration conf) {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    return conf.get(MASTER_HOSTNAME_KEY);<a name="line.565"></a>
-<span class="sourceLineNo">566</span>  }<a name="line.566"></a>
-<span class="sourceLineNo">567</span><a name="line.567"></a>
-<span class="sourceLineNo">568</span>  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  // block in here until then.<a name="line.569"></a>
-<span class="sourceLineNo">570</span>  @Override<a name="line.570"></a>
-<span class="sourceLineNo">571</span>  public void run() {<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    try {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>        Threads.setDaemonThreadRunning(new Thread(() -&gt; {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>          try {<a name="line.575"></a>
-<span class="sourceLineNo">576</span>            int infoPort = putUpJettyServer();<a name="line.576"></a>
-<span class="sourceLineNo">577</span>            startActiveMasterManager(infoPort);<a name="line.577"></a>
-<span class="sourceLineNo">578</span>          } catch (Throwable t) {<a name="line.578"></a>
-<span class="sourceLineNo">579</span>            // Make sure we log the exception.<a name="line.579"></a>
-<span class="sourceLineNo">580</span>            String error = "Failed to become Active Master";<a name="line.580"></a>
-<span class="sourceLineNo">581</span>            LOG.error(error, t);<a name="line.581"></a>
-<span class="sourceLineNo">582</span>            // Abort should have been called already.<a name="line.582"></a>
-<span class="sourceLineNo">583</span>            if (!isAborted()) {<a name="line.583"></a>
-<span class="sourceLineNo">584</span>              abort(error, t);<a name="line.584"></a>
-<span class="sourceLineNo">585</span>            }<a name="line.585"></a>
-<span class="sourceLineNo">586</span>          }<a name="line.586"></a>
-<span class="sourceLineNo">587</span>        }));<a name="line.587"></a>
-<span class="sourceLineNo">588</span>      }<a name="line.588"></a>
-<span class="sourceLineNo">589</span>      // Fall in here even if we have been aborted. Need to run the shutdown services and<a name="line.589"></a>
-<span class="sourceLineNo">590</span>      // the super run call will do this for us.<a name="line.590"></a>
-<span class="sourceLineNo">591</span>      super.run();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    } finally {<a name="line.592"></a>
-<span class="sourceLineNo">593</span>      if (this.clusterSchemaService != null) {<a name="line.593"></a>
-<span class="sourceLineNo">594</span>        // If on way out, then we are no longer active master.<a name="line.594"></a>
-<span class="sourceLineNo">595</span>        this.clusterSchemaService.stopAsync();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>        try {<a name="line.596"></a>
-<span class="sourceLineNo">597</span>          this.clusterSchemaService.awaitTerminated(<a name="line.597"></a>
-<span class="sourceLineNo">598</span>              getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,<a name="line.598"></a>
-<span class="sourceLineNo">599</span>              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);<a name="line.599"></a>
-<span class="sourceLineNo">600</span>        } catch (TimeoutException te) {<a name="line.600"></a>
-<span class="sourceLineNo">601</span>          LOG.warn("Failed shutdown of clusterSchemaService", te);<a name="line.601"></a>
-<span class="sourceLineNo">602</span>        }<a name="line.602"></a>
-<span class="sourceLineNo">603</span>      }<a name="line.603"></a>
-<span class="sourceLineNo">604</span>      this.activeMaster = false;<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    }<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  }<a name="line.606"></a>
-<span class="sourceLineNo">607</span><a name="line.607"></a>
-<span class="sourceLineNo">608</span>  // return the actual infoPort, -1 means disable info server.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>  private int putUpJettyServer() throws IOException {<a name="line.609"></a>
-<span class="sourceLineNo">610</span>    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      return -1;<a name="line.611"></a>
-<span class="sourceLineNo">612</span>    }<a name="line.612"></a>
-<span class="sourceLineNo">613</span>    final int infoPort = conf.getInt("hbase.master.info.port.orig",<a name="line.613"></a>
-<span class="sourceLineNo">614</span>      HConstants.DEFAULT_MASTER_INFOPORT);<a name="line.614"></a>
-<span class="sourceLineNo">615</span>    // -1 is for disabling info server, so no redirecting<a name="line.615"></a>
-<span class="sourceLineNo">616</span>    if (infoPort &lt; 0 || infoServer == null) {<a name="line.616"></a>
-<span class="sourceLineNo">617</span>      return -1;<a name="line.617"></a>
-<span class="sourceLineNo">618</span>    }<a name="line.618"></a>
-<span class="sourceLineNo">619</span>    if(infoPort == infoServer.getPort()) {<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      return infoPort;<a name="line.620"></a>
-<span class="sourceLineNo">621</span>    }<a name="line.621"></a>
-<span class="sourceLineNo">622</span>    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");<a name="line.622"></a>
-<span class="sourceLineNo">623</span>    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {<a name="line.623"></a>
-<span class="sourceLineNo">624</span>      String msg =<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          "Failed to start redirecting jetty server. Address " + addr<a name="line.625"></a>
-<span class="sourceLineNo">626</span>              + " does not belong to this host. Correct configuration parameter: "<a name="line.626"></a>
-<span class="sourceLineNo">627</span>              + "hbase.master.info.bindAddress";<a name="line.627"></a>
-<span class="sourceLineNo">628</span>      LOG.error(msg);<a name="line.628"></a>
-<span class="sourceLineNo">629</span>      throw new IOException(msg);<a name="line.629"></a>
-<span class="sourceLineNo">630</span>    }<a name="line.630"></a>
-<span class="sourceLineNo">631</span><a name="line.631"></a>
-<span class="sourceLineNo">632</span>    // TODO I'm pretty sure we could just add another binding to the InfoServer run by<a name="line.632"></a>
-<span class="sourceLineNo">633</span>    // the RegionServer and have it run the RedirectServlet instead of standing up<a name="line.633"></a>
-<span class="sourceLineNo">634</span>    // a second entire stack here.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>    masterJettyServer = new Server();<a name="line.635"></a>
-<span class="sourceLineNo">636</span>    final ServerConnector connector = new ServerConnector(masterJettyServer);<a name="line.636"></a>
-<span class="sourceLineNo">637</span>    connector.setHost(addr);<a name="line.637"></a>
-<span class="sourceLineNo">638</span>    connector.setPort(infoPort);<a name="line.638"></a>
-<span class="sourceLineNo">639</span>    masterJettyServer.addConnector(connector);<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    masterJettyServer.setStopAtShutdown(true);<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    final String redirectHostname =<a name="line.642"></a>
-<span class="sourceLineNo">643</span>        StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;<a name="line.643"></a>
-<span class="sourceLineNo">644</span><a name="line.644"></a>
-<span class="sourceLineNo">645</span>    final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    context.addServlet(new ServletHolder(redirect), "/*");<a name="line.647"></a>
-<span class="sourceLineNo">648</span>    context.setServer(masterJettyServer);<a name="line.648"></a>
-<span class="sourceLineNo">649</span><a name="line.649"></a>
-<span class="sourceLineNo">650</span>    try {<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      masterJettyServer.start();<a name="line.651"></a>
-<span class="sourceLineNo">652</span>    } catch (Exception e) {<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      throw new IOException("Failed to start redirecting jetty server", e);<a name="line.653"></a>
-<span class="sourceLineNo">654</span>    }<a name="line.654"></a>
-<span class="sourceLineNo">655</span>    return connector.getLocalPort();<a name="line.655"></a>
-<span class="sourceLineNo">656</span>  }<a name="line.656"></a>
-<span class="sourceLineNo">657</span><a name="line.657"></a>
-<span class="sourceLineNo">658</span>  @Override<a name="line.658"></a>
-<span class="sourceLineNo">659</span>  protected Function&lt;TableDescriptorBuilder, TableDescriptorBuilder&gt; getMetaTableObserver() {<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    return builder -&gt; builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));<a name="line.660"></a>
-<span class="sourceLineNo">661</span>  }<a name="line.661"></a>
-<span class="sourceLineNo">662</span>  /**<a name="line.662"></a>
-<span class="sourceLineNo">663</span>   * For compatibility, if failed with regionserver credentials, try the master one<a name="line.663"></a>
-<span class="sourceLineNo">664</span>   */<a name="line.664"></a>
-<span class="sourceLineNo">665</span>  @Override<a name="line.665"></a>
-<span class="sourceLineNo">666</span>  protected void login(UserProvider user, String host) throws IOException {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>    try {<a name="line.667"></a>
-<span class="sourceLineNo">668</span>      super.login(user, host);<a name="line.668"></a>
-<span class="sourceLineNo">669</span>    } catch (IOException ie) {<a name="line.669"></a>
-<span class="sourceLineNo">670</span>      user.login("hbase.master.keytab.file",<a name="line.670"></a>
-<span class="sourceLineNo">671</span>        "hbase.master.kerberos.principal", host);<a name="line.671"></a>
-<span class="sourceLineNo">672</span>    }<a name="line.672"></a>
-<span class="sourceLineNo">673</span>  }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>  /**<a name="line.675"></a>
-<span class="sourceLineNo">676</span>   * If configured to put regions on active master,<a name="line.676"></a>
-<span class="sourceLineNo">677</span>   * wait till a backup master becomes active.<a name="line.677"></a>
-<span class="sourceLineNo">678</span>   * Otherwise, loop till the server is stopped or aborted.<a name="line.678"></a>
-<span class="sourceLineNo">679</span>   */<a name="line.679"></a>
-<span class="sourceLineNo">680</span>  @Override<a name="line.680"></a>
-<span class="sourceLineNo">681</span>  protected void waitForMasterActive(){<a name="line.681"></a>
-<span class="sourceLineNo">682</span>    boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);<a name="line.682"></a>
-<span class="sourceLineNo">683</span>    while (!(tablesOnMaster &amp;&amp; activeMaster) &amp;&amp; !isStopped() &amp;&amp; !isAborted()) {<a name="line.683"></a>
-<span class="sourceLineNo">684</span>      sleeper.sleep();<a name="line.684"></a>
-<span class="sourceLineNo">685</span>    }<a name="line.685"></a>
-<span class="sourceLineNo">686</span>  }<a name="line.686"></a>
-<span class="sourceLineNo">687</span><a name="line.687"></a>
-<span class="sourceLineNo">688</span>  @VisibleForTesting<a name="line.688"></a>
-<span class="sourceLineNo">689</span>  public MasterRpcServices getMasterRpcServices() {<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    return (MasterRpcServices)rpcServices;<a name="line.690"></a>
-<span class="sourceLineNo">691</span>  }<a name="line.691"></a>
-<span class="sourceLineNo">692</span><a name="line.692"></a>
-<span class="sourceLineNo">693</span>  public boolean balanceSwitch(final boolean b) throws IOException {<a name="line.693"></a>
-<span class="sourceLineNo">694</span>    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);<a name="line.694"></a>
-<span class="sourceLineNo">695</span>  }<a name="line.695"></a>
-<span class="sourceLineNo">696</span><a name="line.696"></a>
-<span class="sourceLineNo">697</span>  @Override<a name="line.697"></a>
-<span class="sourceLineNo">698</span>  protected String getProcessName() {<a name="line.698"></a>
-<span class="sourceLineNo">699</span>    return MASTER;<a name="line.699"></a>
-<span class="sourceLineNo">700</span>  }<a name="line.700"></a>
-<span class="sourceLineNo">701</span><a name="line.701"></a>
-<span class="sourceLineNo">702</span>  @Override<a name="line.702"></a>
-<span class="sourceLineNo">703</span>  protected boolean canCreateBaseZNode() {<a name="line.703"></a>
-<span class="sourceLineNo">704</span>    return true;<a name="line.704"></a>
-<span class="sourceLineNo">705</span>  }<a name="line.705"></a>
-<span class="sourceLineNo">706</span><a name="line.706"></a>
-<span class="sourceLineNo">707</span>  @Override<a name="line.707"></a>
-<span class="sourceLineNo">708</span>  protected boolean canUpdateTableDescriptor() {<a name="line.708"></a>
-<span class="sourceLineNo">709</span>    return true;<a name="line.709"></a>
-<span class="sourceLineNo">710</span>  }<a name="line.710"></a>
-<span class="sourceLineNo">711</span><a name="line.711"></a>
-<span class="sourceLineNo">712</span>  @Override<a name="line.712"></a>
-<span class="sourceLineNo">713</span>  protected RSRpcServices createRpcServices() throws IOException {<a name="line.713"></a>
-<span class="sourceLineNo">714</span>    return new MasterRpcServices(this);<a name="line.714"></a>
-<span class="sourceLineNo">715</span>  }<a name="line.715"></a>
-<span class="sourceLineNo">716</span><a name="line.716"></a>
-<span class="sourceLineNo">717</span>  @Override<a name="line.717"></a>
-<span class="sourceLineNo">718</span>  protected void configureInfoServer() {<a name="line.718"></a>
-<span class="sourceLineNo">719</span>    infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);<a name="line.719"></a>
-<span class="sourceLineNo">720</span>    infoServer.setAttribute(MASTER, this);<a name="line.720"></a>
-<span class="sourceLineNo">721</span>    if (LoadBalancer.isTablesOnMaster(conf)) {<a name="line.721"></a>
-<span class="sourceLineNo">722</span>      super.configureInfoServer();<a name="line.722"></a>
-<span class="sourceLineNo">723</span>    }<a name="line.723"></a>
-<span class="sourceLineNo">724</span>  }<a name="line.724"></a>
-<span class="sourceLineNo">725</span><a name="line.725"></a>
-<span class="sourceLineNo">726</span>  @Override<a name="line.726"></a>
-<span class="sourceLineNo">727</span>  protected Class&lt;? extends HttpServlet&gt; getDumpServlet() {<a name="line.727"></a>
-<span class="sourceLineNo">728</span>    return MasterDumpServlet.class;<a name="line.728"></a>
-<span class="sourceLineNo">729</span>  }<a name="line.729"></a>
-<span class="sourceLineNo">730</span><a name="line.730"></a>
-<span class="sourceLineNo">731</span>  @Override<a name="line.731"></a>
-<span class="sourceLineNo">732</span>  public MetricsMaster getMasterMetrics() {<a name="line.732"></a>
-<span class="sourceLineNo">733</span>    return metricsMaster;<a name="line.733"></a>
-<span class="sourceLineNo">734</span>  }<a name="line.734"></a>
-<span class="sourceLineNo">735</span><a name="line.735"></a>
-<span class="sourceLineNo">736</span>  /**<a name="line.736"></a>
-<span class="sourceLineNo">737</span>   * &lt;p&gt;<a name="line.737"></a>
-<span class="sourceLineNo">738</span>   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it<a name="line.738"></a>
-<span class="sourceLineNo">739</span>   * should have already been initialized along with {@link ServerManager}.<a name="line.739"></a>
-<span class="sourceLineNo">740</span>   * &lt;/p&gt;<a name="line.740"></a>
-<span class="sourceLineNo">741</span>   * &lt;p&gt;<a name="line.741"></a>
-<span class="sourceLineNo">742</span>   * Will be overridden in tests.<a name="line.742"></a>
-<span class="sourceLineNo">743</span>   * &lt;/p&gt;<a name="line.743"></a>
-<span class="sourceLineNo">744</span>   */<a name="line.744"></a>
-<span class="sourceLineNo">745</span>  @VisibleForTesting<a name="line.745"></a>
-<span class="sourceLineNo">746</span>  protected void initializeZKBasedSystemTrackers()<a name="line.746"></a>
-<span class="sourceLineNo">747</span>      throws IOException, InterruptedException, KeeperException, ReplicationException {<a name="line.747"></a>
-<span class="sourceLineNo">748</span>    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);<a name="line.748"></a>
-<span class="sourceLineNo">749</span>    this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);<a name="line.749"></a>
-<span class="sourceLineNo">750</span>    this.normalizer.setMasterServices(this);<a name="line.750"></a>
-<span class="sourceLineNo">751</span>    this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);<a name="line.751"></a>
-<span class="sourceLineNo">752</span>    this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);<a name="line.752"></a>
-<span class="sourceLineNo">753</span>    this.loadBalancerTracker.start();<a name="line.753"></a>
-<span class="sourceLineNo">754</span><a name="line.754"></a>
-<span class="sourceLineNo">755</span>    this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    this.regionNormalizerTracker.start();<a name="line.756"></a>
-<span class="sourceLineNo">757</span><a name="line.757"></a>
-<span class="sourceLineNo">758</span>    this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);<a name="line.758"></a>
-<span class="sourceLineNo">759</span>    this.splitOrMergeTracker.start();<a name="line.759"></a>
-<span class="sourceLineNo">760</span><a name="line.760"></a>
-<span class="sourceLineNo">761</span>    this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);<a name="line.761"></a>
-<span class="sourceLineNo">762</span>    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);<a name="line.762"></a>
-<span class="sourceLineNo">763</span><a name="line.763"></a>
-<span class="sourceLineNo">764</span>    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);<a name="line.764"></a>
-<span class="sourceLineNo">765</span>    this.drainingServerTracker.start();<a name="line.765"></a>
-<span class="sourceLineNo">766</span><a name="line.766"></a>
-<span class="sourceLineNo">767</span>    this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);<a name="line.767"></a>
-<span class="sourceLineNo">768</span>    this.maintenanceModeTracker.start();<a name="line.768"></a>
-<span class="sourceLineNo">769</span><a name="line.769"></a>
-<span class="sourceLineNo">770</span>    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);<a name="line.770"></a>
-<span class="sourceLineNo">771</span>    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,<a name="line.771"></a>
-<span class="sourceLineNo">772</span>      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);<a name="line.772"></a>
-<span class="sourceLineNo">773</span>    if (clientQuorumServers != null &amp;&amp; !clientZkObserverMode) {<a name="line.773"></a>
-<span class="sourceLineNo">774</span>      // we need to take care of the ZK information synchronization<a name="line.774"></a>
-<span class="sourceLineNo">775</span>      // if given client ZK are not observer nodes<a name="line.775"></a>
-<span class="sourceLineNo">776</span>      ZKWatcher clientZkWatcher = new ZKWatcher(conf,<a name="line.776"></a>
-<span class="sourceLineNo">777</span>          getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,<a name="line.777"></a>
-<span class="sourceLineNo">778</span>          false, true);<a name="line.778"></a>
-<span class="sourceLineNo">779</span>      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);<a name="line.779"></a>
-<span class="sourceLineNo">780</span>      this.metaLocationSyncer.start();<a name="line.780"></a>
-<span class="sourceLineNo">781</span>      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);<a name="line.781"></a>
-<span class="sourceLineNo">782</span>      this.masterAddressSyncer.start();<a name="line.782"></a>
-<span class="sourceLineNo">783</span>      // set cluster id is a one-go effort<a name="line.783"></a>
-<span class="sourceLineNo">784</span>      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());<a name="line.784"></a>
-<span class="sourceLineNo">785</span>    }<a name="line.785"></a>
-<span class="sourceLineNo">786</span><a name="line.786"></a>
-<span class="sourceLineNo">787</span>    // Set the cluster as up.  If new RSs, they'll be waiting on this before<a name="line.787"></a>
-<span class="sourceLineNo">788</span>    // going ahead with their startup.<a name="line.788"></a>
-<span class="sourceLineNo">789</span>    boolean wasUp = this.clusterStatusTracker.isClusterUp();<a name="line.789"></a>
-<span class="sourceLineNo">790</span>    if (!wasUp) this.clusterStatusTracker.setClusterUp();<a name="line.790"></a>
-<span class="sourceLineNo">791</span><a name="line.791"></a>
-<span class="sourceLineNo">792</span>    LOG.info("Active/primary master=" + this.serverName +<a name="line.792"></a>
-<span class="sourceLineNo">793</span>        ", sessionid=0x" +<a name="line.793"></a>
-<span class="sourceLineNo">794</span>        Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +<a name="line.794"></a>
-<span class="sourceLineNo">795</span>        ", setting cluster-up flag (Was=" + wasUp + ")");<a name="line.795"></a>
-<span class="sourceLineNo">796</span><a name="line.796"></a>
-<span class="sourceLineNo">797</span>    // create/initialize the snapshot manager and other procedure managers<a name="line.797"></a>
-<span class="sourceLineNo">798</span>    this.snapshotManager = new SnapshotManager();<a name="line.798"></a>
-<span class="sourceLineNo">799</span>    this.mpmHost = new MasterProcedureManagerHost();<a name="line.799"></a>
-<span class="sourceLineNo">800</span>    this.mpmHost.register(this.snapshotManager);<a name="line.800"></a>
-<span class="sourceLineNo">801</span>    this.mpmHost.register(new MasterFlushTableProcedureManager());<a name="line.801"></a>
-<span class="sourceLineNo">802</span>    this.mpmHost.loadProcedures(conf);<a name="line.802"></a>
-<span class="sourceLineNo">803</span>    this.mpmHost.initialize(this, this.metricsMaster);<a name="line.803"></a>
-<span class="sourceLineNo">804</span>  }<a name="line.804"></a>
-<span class="sourceLineNo">805</span><a name="line.805"></a>
-<span class="sourceLineNo">806</span>  private static final ImmutableSet&lt;Class&lt;?&gt;&gt; UNSUPPORTED_PROCEDURES =<a name="line.806"></a>
-<span class="sourceLineNo">807</span>    ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class,<a name="line.807"></a>
-<span class="sourceLineNo">808</span>      MoveRegionProcedure.class);<a name="line.808"></a>
-<span class="sourceLineNo">809</span><a name="line.809"></a>
-<span class="sourceLineNo">810</span>  /**<a name="line.810"></a>
-<span class="sourceLineNo">811</span>   * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is<a name="line.811"></a>
-<span class="sourceLineNo">812</span>   * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to<a name="line.812"></a>
-<span class="sourceLineNo">813</span>   * make sure that there are none these procedures when upgrading. If there are, the master will<a name="line.813"></a>
-<span class="sourceLineNo">814</span>   * quit, you need to go back to the old version to finish these procedures first before upgrading.<a name="line.814"></a>
-<span class="sourceLineNo">815</span>   */<a name="line.815"></a>
-<span class="sourceLineNo">816</span>  private void checkUnsupportedProcedure(<a name="line.816"></a>
-<span class="sourceLineNo">817</span>      Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType)<a name="line.817"></a>
-<span class="sourceLineNo">818</span>      throws HBaseIOException {<a name="line.818"></a>
-<span class="sourceLineNo">819</span>    // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to<a name="line.819"></a>
-<span class="sourceLineNo">820</span>    // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as<a name="line.820"></a>
-<span class="sourceLineNo">821</span>    // there will be conflict in the code for AM. We should finish all these procedures before<a name="line.821"></a>
-<span class="sourceLineNo">822</span>    // upgrading.<a name="line.822"></a>
-<span class="sourceLineNo">823</span>    for (Class&lt;?&gt; clazz : UNSUPPORTED_PROCEDURES) {<a name="line.823"></a>
-<span class="sourceLineNo">824</span>      List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt; procs = procsByType.get(clazz);<a name="line.824"></a>
-<span class="sourceLineNo">825</span>      if (procs != null) {<a name="line.825"></a>
-<span class="sourceLineNo">826</span>        LOG.error(<a name="line.826"></a>
-<span class="sourceLineNo">827</span>          "Unsupported procedure type {} found, please rollback your master to the old" +<a name="line.827"></a>
-<span class="sourceLineNo">828</span>            " version to finish them, and then try to upgrade again. The full procedure list: {}",<a name="line.828"></a>
-<span class="sourceLineNo">829</span>          clazz, procs);<a name="line.829"></a>
-<span class="sourceLineNo">830</span>        throw new HBaseIOException("Unsupported procedure type " + clazz + " found");<a name="line.830"></a>
-<span class="sourceLineNo">831</span>      }<a name="line.831"></a>
-<span class="sourceLineNo">832</span>    }<a name="line.832"></a>
-<span class="sourceLineNo">833</span>    // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to<a name="line.833"></a>
-<span class="sourceLineNo">834</span>    // make sure that no one will try to schedule it but SCP does have a state which will schedule<a name="line.834"></a>
-<span class="sourceLineNo">835</span>    // it.<a name="line.835"></a>
-<span class="sourceLineNo">836</span>    if (procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.836"></a>
-<span class="sourceLineNo">837</span>      .map(p -&gt; (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState)) {<a name="line.837"></a>
-<span class="sourceLineNo">838</span>      LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," +<a name="line.838"></a>
-<span class="sourceLineNo">839</span>        " which is not supported any more. Please rollback your master to the old version to" +<a name="line.839"></a>
-<span class="sourceLineNo">840</span>        " finish them, and then try to upgrade again.");<a name="line.840"></a>
-<span class="sourceLineNo">841</span>      throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure");<a name="line.841"></a>
-<span class="sourceLineNo">842</span>    }<a name="line.842"></a>
-<span class="sourceLineNo">843</span>  }<a name="line.843"></a>
-<span class="sourceLineNo">844</span><a name="line.844"></a>
-<span class="sourceLineNo">845</span>  /**<a name="line.845"></a>
-<span class="sourceLineNo">846</span>   * Finish initialization of HMaster after becoming the primary master.<a name="line.846"></a>
-<span class="sourceLineNo">847</span>   * &lt;p/&gt;<a name="line.847"></a>
-<span class="sourceLineNo">848</span>   * The startup order is a bit complicated but very important, do not change it unless you know<a name="line.848"></a>
-<span class="sourceLineNo">849</span>   * what you are doing.<a name="line.849"></a>
-<span class="sourceLineNo">850</span>   * &lt;ol&gt;<a name="line.850"></a>
-<span class="sourceLineNo">851</span>   * &lt;li&gt;Initialize file system based components - file system manager, wal manager, table<a name="line.851"></a>
-<span class="sourceLineNo">852</span>   * descriptors, etc&lt;/li&gt;<a name="line.852"></a>
-<span class="sourceLineNo">853</span>   * &lt;li&gt;Publish cluster id&lt;/li&gt;<a name="line.853"></a>
-<span class="sourceLineNo">854</span>   * &lt;li&gt;Here comes the most complicated part - initialize server manager, assignment manager and<a name="line.854"></a>
-<span class="sourceLineNo">855</span>   * region server tracker<a name="line.855"></a>
-<span class="sourceLineNo">856</span>   * &lt;ol type='i'&gt;<a name="line.856"></a>
-<span class="sourceLineNo">857</span>   * &lt;li&gt;Create server manager&lt;/li&gt;<a name="line.857"></a>
-<span class="sourceLineNo">858</span>   * &lt;li&gt;Create procedure executor, load the procedures, but do not start workers. We will start it<a name="line.858"></a>
-<span class="sourceLineNo">859</span>   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same<a name="line.859"></a>
-<span class="sourceLineNo">860</span>   * server&lt;/li&gt;<a name="line.860"></a>
-<span class="sourceLineNo">861</span>   * &lt;li&gt;Create assignment manager and start it, load the meta region state, but do not load data<a name="line.861"></a>
-<span class="sourceLineNo">862</span>   * from meta region&lt;/li&gt;<a name="line.862"></a>
-<span class="sourceLineNo">863</span>   * &lt;li&gt;Start region server tracker, construct the online servers set and find out dead servers and<a name="line.863"></a>
-<span class="sourceLineNo">864</span>   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also<a name="line.864"></a>
-<span class="sourceLineNo">865</span>   * scan the wal directory to find out possible live region servers, and the differences between<a name="line.865"></a>
-<span class="sourceLineNo">866</span>   * these two sets are the dead servers&lt;/li&gt;<a name="line.866"></a>
-<span class="sourceLineNo">867</span>   * &lt;/ol&gt;<a name="line.867"></a>
-<span class="sourceLineNo">868</span>   * &lt;/li&gt;<a name="line.868"></a>
-<span class="sourceLineNo">869</span>   * &lt;li&gt;If this is a new deploy, schedule a InitMetaProcedure to initialize meta&lt;/li&gt;<a name="line.869"></a>
-<span class="sourceLineNo">870</span>   * &lt;li&gt;Start necessary service threads - balancer, catalog janior, executor services, and also the<a name="line.870"></a>
-<span class="sourceLineNo">871</span>   * procedure executor, etc. Notice that the balancer must be created first as assignment manager<a name="line.871"></a>
-<span class="sourceLineNo">872</span>   * may use it when assigning regions.&lt;/li&gt;<a name="line.872"></a>
-<span class="sourceLineNo">873</span>   * &lt;li&gt;Wait for meta to be initialized if necesssary, start table state manager.&lt;/li&gt;<a name="line.873"></a>
-<span class="sourceLineNo">874</span>   * &lt;li&gt;Wait for enough region servers to check-in&lt;/li&gt;<a name="line.874"></a>
-<span class="sourceLineNo">875</span>   * &lt;li&gt;Let assignment manager load data from meta and construct region states&lt;/li&gt;<a name="line.875"></a>
-<span class="sourceLineNo">876</span>   * &lt;li&gt;Start all other things such as chore services, etc&lt;/li&gt;<a name="line.876"></a>
-<span class="sourceLineNo">877</span>   * &lt;/ol&gt;<a name="line.877"></a>
-<span class="sourceLineNo">878</span>   * &lt;p/&gt;<a name="line.878"></a>
-<span class="sourceLineNo">879</span>   * Notice that now we will not schedule a special procedure to make meta online(unless the first<a name="line.879"></a>
-<span class="sourceLineNo">880</span>   * time where meta has not been created yet), we will rely on SCP to bring meta online.<a name="line.880"></a>
-<span class="sourceLineNo">881</span>   */<a name="line.881"></a>
-<span class="sourceLineNo">882</span>  private void finishActiveMasterInitialization(MonitoredTask status) throws IOException,<a name="line.882"></a>
-<span class="sourceLineNo">883</span>          InterruptedException, KeeperException, ReplicationException {<a name="line.883"></a>
-<span class="sourceLineNo">884</span>    Thread zombieDetector = new Thread(new InitializationMonitor(this),<a name="line.884"></a>
-<span class="sourceLineNo">885</span>        "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());<a name="line.885"></a>
-<span class="sourceLineNo">886</span>    zombieDetector.setDaemon(true);<a name="line.886"></a>
-<span class="sourceLineNo">887</span>    zombieDetector.start();<a name="line.887"></a>
-<span class="sourceLineNo">888</span><a name="line.888"></a>
-<span class="sourceLineNo">889</span>    /*<a name="line.889"></a>
-<span class="sourceLineNo">890</span>     * We are active master now... go initialize components we need to run.<a name="line.890"></a>
-<span class="sourceLineNo">891</span>     */<a name="line.891"></a>
-<span class="sourceLineNo">892</span>    status.setStatus("Initializing Master file system");<a name="line.892"></a>
-<span class="sourceLineNo">893</span><a name="line.893"></a>
-<span class="sourceLineNo">894</span>    this.masterActiveTime = System.currentTimeMillis();<a name="line.894"></a>
-<span class="sourceLineNo">895</span>    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.<a name="line.895"></a>
-<span class="sourceLineNo">896</span>    // Initialize the chunkCreator<a name="line.896"></a>
-<span class="sourceLineNo">897</span>    initializeMemStoreChunkCreator();<a name="line.897"></a>
-<span class="sourceLineNo">898</span>    this.fileSystemManager = new MasterFileSystem(conf);<a name="line.898"></a>
-<span class="sourceLineNo">899</span>    this.walManager = new MasterWalManager(this);<a name="line.899"></a>
-<span class="sourceLineNo">900</span><a name="line.900"></a>
-<span class="sourceLineNo">901</span>    // enable table descriptors cache<a name="line.901"></a>
-<span class="sourceLineNo">902</span>    this.tableDescriptors.setCacheOn();<a name="line.902"></a>
-<span class="sourceLineNo">903</span><a name="line.903"></a>
-<span class="sourceLineNo">904</span>    // warm-up HTDs cache on master initialization<a name="line.904"></a>
-<span class="sourceLineNo">905</span>    if (preLoadTableDescriptors) {<a name="line.905"></a>
-<span class="sourceLineNo">906</span>      status.setStatus("Pre-loading table descriptors");<a name="line.906"></a>
-<span class="sourceLineNo">907</span>      this.tableDescriptors.getAll();<a name="line.907"></a>
-<span class="sourceLineNo">908</span>    }<a name="line.908"></a>
-<span class="sourceLineNo">909</span><a name="line.909"></a>
-<span class="sourceLineNo">910</span>    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but<a name="line.910"></a>
-<span class="sourceLineNo">911</span>    // only after it has checked in with the Master. At least a few tests ask Master for clusterId<a name="line.911"></a>
-<span class="sourceLineNo">912</span>    // before it has called its run method and before RegionServer has done the reportForDuty.<a name="line.912"></a>
-<span class="sourceLineNo">913</span>    ClusterId clusterId = fileSystemManager.getClusterId();<a name="line.913"></a>
-<span class="sourceLineNo">914</span>    status.setStatus("Publishing Cluster ID " + clusterId + " in ZooKeeper");<a name="line.914"></a>
-<span class="sourceLineNo">915</span>    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());<a name="line.915"></a>
-<span class="sourceLineNo">916</span>    this.clusterId = clusterId.toString();<a name="line.916"></a>
-<span class="sourceLineNo">917</span><a name="line.917"></a>
-<span class="sourceLineNo">918</span>    status.setStatus("Initialze ServerManager and schedule SCP for crash servers");<a name="line.918"></a>
-<span class="sourceLineNo">919</span>    this.serverManager = createServerManager(this);<a name="line.919"></a>
-<span class="sourceLineNo">920</span>    createProcedureExecutor();<a name="line.920"></a>
-<span class="sourceLineNo">921</span>    @SuppressWarnings("rawtypes")<a name="line.921"></a>
-<span class="sourceLineNo">922</span>    Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType =<a name="line.922"></a>
-<span class="sourceLineNo">923</span>      procedureExecutor.getActiveProceduresNoCopy().stream()<a name="line.923"></a>
-<span class="sourceLineNo">924</span>        .collect(Collectors.groupingBy(p -&gt; p.getClass()));<a name="line.924"></a>
-<span class="sourceLineNo">925</span><a name="line.925"></a>
-<span class="sourceLineNo">926</span>    checkUnsupportedProcedure(procsByType);<a name="line.926"></a>
-<span class="sourceLineNo">927</span><a name="line.927"></a>
-<span class="sourceLineNo">928</span>    // Create Assignment Manager<a name="line.928"></a>
-<span class="sourceLineNo">929</span>    this.assignmentManager = new AssignmentManager(this);<a name="line.929"></a>
-<span class="sourceLineNo">930</span>    this.assignmentManager.start();<a name="line.930"></a>
-<span class="sourceLineNo">931</span>    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as<a name="line.931"></a>
-<span class="sourceLineNo">932</span>    // completed, it could still be in the procedure list. This is a bit strange but is another<a name="line.932"></a>
-<span class="sourceLineNo">933</span>    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.<a name="line.933"></a>
-<span class="sourceLineNo">934</span>    List&lt;TransitRegionStateProcedure&gt; ritList =<a name="line.934"></a>
-<span class="sourceLineNo">935</span>      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()<a name="line.935"></a>
-<span class="sourceLineNo">936</span>        .filter(p -&gt; !p.isFinished()).map(p -&gt; (TransitRegionStateProcedure) p)<a name="line.936"></a>
-<span class="sourceLineNo">937</span>        .collect(Collectors.toList());<a name="line.937"></a>
-<span class="sourceLineNo">938</span>    this.assignmentManager.setupRIT(ritList);<a name="line.938"></a>
-<span class="sourceLineNo">939</span><a name="line.939"></a>
-<span class="sourceLineNo">940</span>    this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);<a name="line.940"></a>
-<span class="sourceLineNo">941</span>    this.regionServerTracker.start(<a name="line.941"></a>
-<span class="sourceLineNo">942</span>      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.942"></a>
-<span class="sourceLineNo">943</span>        .map(p -&gt; (ServerCrashProcedure) p).map(p -&gt; p.getServerName()).collect(Collectors.toSet()),<a name="line.943"></a>
-<span class="sourceLineNo">944</span>      walManager.getLiveServersFromWALDir());<a name="line.944"></a>
-<span class="sourceLineNo">945</span>    // This manager will be started AFTER hbase:meta is confirmed on line.<a name="line.945"></a>
-<span class="sourceLineNo">946</span>    // hbase.mirror.table.state.to.zookeeper is so hbase1 clients can connect. They read table<a name="line.946"></a>
-<span class="sourceLineNo">947</span>    // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients.<a name="line.947"></a>
-<span class="sourceLineNo">948</span>    this.tableStateManager =<a name="line.948"></a>
-<span class="sourceLineNo">949</span>      this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true)<a name="line.949"></a>
-<span class="sourceLineNo">950</span>        ?<a name="line.950"></a>
-<span class="sourceLineNo">951</span>        new MirroringTableStateManager(this):<a name="line.951"></a>
-<span class="sourceLineNo">952</span>        new TableStateManager(this);<a name="line.952"></a>
-<span class="sourceLineNo">953</span><a name="line.953"></a>
-<span class="sourceLineNo">954</span>    status.setStatus("Initializing ZK system trackers");<a name="line.954"></a>
-<span class="sourceLineNo">955</span>    initializeZKBasedSystemTrackers();<a name="line.955"></a>
-<span class="sourceLineNo">956</span>    status.setStatus("Loading last flushed sequence id of regions");<a name="line.956"></a>
-<span class="sourceLineNo">957</span>    try {<a name="line.957"></a>
-<span class="sourceLineNo">958</span>      this.serverManager.loadLastFlushedSequenceIds();<a name="line.958"></a>
-<span class="sourceLineNo">959</span>    } catch (IOException e) {<a name="line.959"></a>
-<span class="sourceLineNo">960</span>      LOG.debug("Failed to load last flushed sequence id of regions"<a name="line.960"></a>
-<span class="sourceLineNo">961</span>          + " from file system", e);<a name="line.961"></a>
-<span class="sourceLineNo">962</span>    }<a name="line.962"></a>
-<span class="sourceLineNo">963</span>    // Set ourselves as active Master now our claim has succeeded up in zk.<a name="line.963"></a>
-<span class="sourceLineNo">964</span>    this.activeMaster = true;<a name="line.964"></a>
-<span class="sourceLineNo">965</span><a name="line.965"></a>
-<span class="sourceLineNo">966</span>    // This is for backwards compatibility<a name="line.966"></a>
-<span class="sourceLineNo">967</span>    // See HBASE-11393<a name="line.967"></a>
-<span class="sourceLineNo">968</span>    status.setStatus("Update TableCFs node in ZNode");<a name="line.968"></a>
-<span class="sourceLineNo">969</span>    ReplicationPeerConfigUpgrader tableCFsUpdater =<a name="line.969"></a>
-<span class="sourceLineNo">970</span>        new ReplicationPeerConfigUpgrader(zooKeeper, conf);<a name="line.970"></a>
-<span class="sourceLineNo">971</span>    tableCFsUpdater.copyTableCFs();<a name="line.971"></a>
-<span class="sourceLineNo">972</span><a name="line.972"></a>
-<span class="sourceLineNo">973</span>    // Add the Observer to delete quotas on table deletion before starting all CPs by<a name="line.973"></a>
-<span class="sourceLineNo">974</span>    // default with quota support, avoiding if user specifically asks to not load this Observer.<a name="line.974"></a>
-<span class="sourceLineNo">975</span>    if (QuotaUtil.isQuotaEnabled(conf)) {<a name="line.975"></a>
-<span class="sourceLineNo">976</span>      updateConfigurationForQuotasObserver(conf);<a name="line.976"></a>
-<span class="sourceLineNo">977</span>    }<a name="line.977"></a>
-<span class="sourceLineNo">978</span>    // initialize master side coprocessors before we start handling requests<a name="line.978"></a>
-<span class="sourceLineNo">979</span>    status.setStatus("Initializing master coprocessors");<a name="line.979"></a>
-<span class="sourceLineNo">980</span>    this.cpHost = new MasterCoprocessorHost(this, this.conf);<a name="line.980"></a>
+<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HBaseFsck;<a name="line.196"></a>
+<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.197"></a>
+<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.198"></a>
+<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.199"></a>
+<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.200"></a>
+<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.201"></a>
+<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.203"></a>
+<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.204"></a>
+<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.207"></a>
+<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.211"></a>
+<span class="sourceLineNo">212</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.212"></a>
+<span class="sourceLineNo">213</span>import org.apache.zookeeper.KeeperException;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.Server;<a name="line.214"></a>
+<span class="sourceLineNo">215</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.215"></a>
+<span class="sourceLineNo">216</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.216"></a>
+<span class="sourceLineNo">217</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>import org.slf4j.Logger;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>import org.slf4j.LoggerFactory;<a name="line.219"></a>
+<span class="sourceLineNo">220</span><a name="line.220"></a>
+<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.224"></a>
+<span class="sourceLineNo">225</span><a name="line.225"></a>
+<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.226"></a>
+<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.227"></a>
+<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.228"></a>
+<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>/**<a name="line.233"></a>
+<span class="sourceLineNo">234</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.234"></a>
+<span class="sourceLineNo">235</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.235"></a>
+<span class="sourceLineNo">236</span> * run the cluster.  All others park themselves in their constructor until<a name="line.236"></a>
+<span class="sourceLineNo">237</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.237"></a>
+<span class="sourceLineNo">238</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.238"></a>
+<span class="sourceLineNo">239</span> *<a name="line.239"></a>
+<span class="sourceLineNo">240</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.240"></a>
+<span class="sourceLineNo">241</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.241"></a>
+<span class="sourceLineNo">242</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.242"></a>
+<span class="sourceLineNo">243</span> *<a name="line.243"></a>
+<span class="sourceLineNo">244</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="l

<TRUNCATED>

[32/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index 09c8ba2..ec6a588 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static interface <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4129">HBaseFsck.ErrorReporter</a></pre>
+<pre>public static interface <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4174">HBaseFsck.ErrorReporter</a></pre>
 </li>
 </ul>
 </div>
@@ -234,7 +234,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>clear</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4141">clear</a>()</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4186">clear</a>()</pre>
 </li>
 </ul>
 <a name="report-java.lang.String-">
@@ -243,7 +243,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>report</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4142">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4187">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
 <a name="reportError-java.lang.String-">
@@ -252,7 +252,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4143">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4188">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
 <a name="reportError-org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE-java.lang.String-">
@@ -261,7 +261,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4144">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4189">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
@@ -271,7 +271,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4145">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4190">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 </li>
@@ -282,7 +282,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4146">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4191">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info)</pre>
@@ -294,7 +294,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4147">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4192">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                  <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                  <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info1,
@@ -307,7 +307,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>summarize</h4>
-<pre>int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4154">summarize</a>()</pre>
+<pre>int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4199">summarize</a>()</pre>
 </li>
 </ul>
 <a name="detail-java.lang.String-">
@@ -316,7 +316,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>detail</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4155">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;details)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4200">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;details)</pre>
 </li>
 </ul>
 <a name="getErrorList--">
@@ -325,7 +325,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>getErrorList</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4156">getErrorList</a>()</pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4201">getErrorList</a>()</pre>
 </li>
 </ul>
 <a name="progress--">
@@ -334,7 +334,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>progress</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4157">progress</a>()</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4202">progress</a>()</pre>
 </li>
 </ul>
 <a name="print-java.lang.String-">
@@ -343,7 +343,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>print</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4158">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4203">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 </li>
 </ul>
 <a name="resetErrors--">
@@ -352,7 +352,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockList">
 <li class="blockList">
 <h4>resetErrors</h4>
-<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4159">resetErrors</a>()</pre>
+<pre>void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4204">resetErrors</a>()</pre>
 </li>
 </ul>
 <a name="tableHasErrors-org.apache.hadoop.hbase.util.HBaseFsck.TableInfo-">
@@ -361,7 +361,7 @@ var activeTableTab = "activeTableTab";
 <ul class="blockListLast">
 <li class="blockList">
 <h4>tableHasErrors</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4160">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#line.4205">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index b6f87f8..d104a0a 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -18,7 +18,7 @@
     catch(err) {
     }
 //-->
-var methods = {"i0":10,"i1":10};
+var methods = {"i0":10,"i1":10,"i2":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.389">HBaseFsck.FileLockCallable</a>
+<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.418">HBaseFsck.FileLockCallable</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true" title="class or interface in java.util.concurrent">Callable</a>&lt;org.apache.hadoop.fs.FSDataOutputStream&gt;</pre>
 </li>
@@ -139,6 +139,14 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <th class="colLast" scope="col">Field and Description</th>
 </tr>
 <tr class="altColor">
+<td class="colFirst"><code>private org.apache.hadoop.conf.Configuration</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#conf">conf</a></span></code>&nbsp;</td>
+</tr>
+<tr class="rowColor">
+<td class="colFirst"><code>private org.apache.hadoop.fs.Path</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#hbckLockPath">hbckLockPath</a></span></code>&nbsp;</td>
+</tr>
+<tr class="altColor">
 <td class="colFirst"><code>(package private) <a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a></code></td>
 <td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#retryCounter">retryCounter</a></span></code>&nbsp;</td>
 </tr>
@@ -157,7 +165,8 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <th class="colOne" scope="col">Constructor and Description</th>
 </tr>
 <tr class="altColor">
-<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#FileLockCallable-org.apache.hadoop.hbase.util.RetryCounter-">FileLockCallable</a></span>(<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>&nbsp;</td>
+<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#FileLockCallable-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">FileLockCallable</a></span>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+                <a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</code>&nbsp;</td>
 </tr>
 </table>
 </li>
@@ -184,6 +193,10 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
                      org.apache.hadoop.fs.Path&nbsp;hbckLockFilePath,
                      org.apache.hadoop.fs.permission.FsPermission&nbsp;defaultPerms)</code>&nbsp;</td>
 </tr>
+<tr id="i2" class="altColor">
+<td class="colFirst"><code>(package private) org.apache.hadoop.fs.Path</code></td>
+<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#getHbckLockPath--">getHbckLockPath</a></span>()</code>&nbsp;</td>
+</tr>
 </table>
 <ul class="blockList">
 <li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
@@ -209,10 +222,28 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <a name="retryCounter">
 <!--   -->
 </a>
-<ul class="blockListLast">
+<ul class="blockList">
 <li class="blockList">
 <h4>retryCounter</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.390">retryCounter</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.419">retryCounter</a></pre>
+</li>
+</ul>
+<a name="conf">
+<!--   -->
+</a>
+<ul class="blockList">
+<li class="blockList">
+<h4>conf</h4>
+<pre>private final&nbsp;org.apache.hadoop.conf.Configuration <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.420">conf</a></pre>
+</li>
+</ul>
+<a name="hbckLockPath">
+<!--   -->
+</a>
+<ul class="blockListLast">
+<li class="blockList">
+<h4>hbckLockPath</h4>
+<pre>private&nbsp;org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.421">hbckLockPath</a></pre>
 </li>
 </ul>
 </li>
@@ -223,13 +254,14 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <!--   -->
 </a>
 <h3>Constructor Detail</h3>
-<a name="FileLockCallable-org.apache.hadoop.hbase.util.RetryCounter-">
+<a name="FileLockCallable-org.apache.hadoop.conf.Configuration-org.apache.hadoop.hbase.util.RetryCounter-">
 <!--   -->
 </a>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>FileLockCallable</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.392">FileLockCallable</a>(<a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</pre>
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.423">FileLockCallable</a>(org.apache.hadoop.conf.Configuration&nbsp;conf,
+                        <a href="../../../../../org/apache/hadoop/hbase/util/RetryCounter.html" title="class in org.apache.hadoop.hbase.util">RetryCounter</a>&nbsp;retryCounter)</pre>
 </li>
 </ul>
 </li>
@@ -240,13 +272,26 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <!--   -->
 </a>
 <h3>Method Detail</h3>
+<a name="getHbckLockPath--">
+<!--   -->
+</a>
+<ul class="blockList">
+<li class="blockList">
+<h4>getHbckLockPath</h4>
+<pre>org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.431">getHbckLockPath</a>()</pre>
+<dl>
+<dt><span class="returnLabel">Returns:</span></dt>
+<dd>Will be <code>null</code> unless you call <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#call--"><code>call()</code></a></dd>
+</dl>
+</li>
+</ul>
 <a name="call--">
 <!--   -->
 </a>
 <ul class="blockList">
 <li class="blockList">
 <h4>call</h4>
-<pre>public&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.396">call</a>()
+<pre>public&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.436">call</a>()
                                              throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -262,7 +307,7 @@ implements <a href="https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 <ul class="blockListLast">
 <li class="blockList">
 <h4>createFileWithRetries</h4>
-<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.417">createFileWithRetries</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
+<pre>private&nbsp;org.apache.hadoop.fs.FSDataOutputStream&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html#line.461">createFileWithRetries</a>(org.apache.hadoop.fs.FileSystem&nbsp;fs,
                                                                       org.apache.hadoop.fs.Path&nbsp;hbckLockFilePath,
                                                                       org.apache.hadoop.fs.permission.FsPermission&nbsp;defaultPerms)
                                                                throws <a href="https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true" title="class or interface in java.io">IOException</a></pre>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index 74fd1fe..ae195fe 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4897">HBaseFsck.HBaseFsckTool</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4942">HBaseFsck.HBaseFsckTool</a>
 extends org.apache.hadoop.conf.Configured
 implements org.apache.hadoop.util.Tool</pre>
 <div class="block">This is a Tool wrapper that gathers -Dxxx=yyy configuration settings from the command line.</div>
@@ -207,7 +207,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HBaseFsckTool</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4898">HBaseFsckTool</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4943">HBaseFsckTool</a>(org.apache.hadoop.conf.Configuration&nbsp;conf)</pre>
 </li>
 </ul>
 </li>
@@ -224,7 +224,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>run</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4900">run</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html#line.4945">run</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>[]&nbsp;args)
         throws <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index 785b6d1..7b862f1 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3868">HBaseFsck.HbckInfo</a>
+<pre>public static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3913">HBaseFsck.HbckInfo</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" title="interface in org.apache.hadoop.hbase.util">KeyRange</a></pre>
 <div class="block">Maintain information about a particular region.  It gathers information
@@ -305,7 +305,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>metaEntry</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3869">metaEntry</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3914">metaEntry</a></pre>
 </li>
 </ul>
 <a name="hdfsEntry">
@@ -314,7 +314,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsEntry</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HdfsEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3870">hdfsEntry</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HdfsEntry</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3915">hdfsEntry</a></pre>
 </li>
 </ul>
 <a name="deployedEntries">
@@ -323,7 +323,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedEntries</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.OnlineEntry</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3871">deployedEntries</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.OnlineEntry</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3916">deployedEntries</a></pre>
 </li>
 </ul>
 <a name="deployedOn">
@@ -332,7 +332,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedOn</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3872">deployedOn</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true" title="class or interface in java.util">List</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3917">deployedOn</a></pre>
 </li>
 </ul>
 <a name="skipChecks">
@@ -341,7 +341,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>skipChecks</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3873">skipChecks</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3918">skipChecks</a></pre>
 </li>
 </ul>
 <a name="isMerged">
@@ -350,7 +350,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>isMerged</h4>
-<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3874">isMerged</a></pre>
+<pre>private&nbsp;boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3919">isMerged</a></pre>
 </li>
 </ul>
 <a name="deployedReplicaId">
@@ -359,7 +359,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>deployedReplicaId</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3875">deployedReplicaId</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3920">deployedReplicaId</a></pre>
 </li>
 </ul>
 <a name="primaryHRIForDeployedReplica">
@@ -368,7 +368,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockListLast">
 <li class="blockList">
 <h4>primaryHRIForDeployedReplica</h4>
-<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3876">primaryHRIForDeployedReplica</a></pre>
+<pre>private&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3921">primaryHRIForDeployedReplica</a></pre>
 </li>
 </ul>
 </li>
@@ -385,7 +385,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HbckInfo</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3878">HbckInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a>&nbsp;metaEntry)</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3923">HbckInfo</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.MetaEntry</a>&nbsp;metaEntry)</pre>
 </li>
 </ul>
 </li>
@@ -402,7 +402,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getReplicaId</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3882">getReplicaId</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3927">getReplicaId</a>()</pre>
 </li>
 </ul>
 <a name="addServer-org.apache.hadoop.hbase.client.RegionInfo-org.apache.hadoop.hbase.ServerName-">
@@ -411,7 +411,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>addServer</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3886">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3931">addServer</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;hri,
                       <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;server)</pre>
 </li>
 </ul>
@@ -421,7 +421,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3899">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3944">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>
@@ -434,7 +434,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getStartKey</h4>
-<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3911">getStartKey</a>()</pre>
+<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3956">getStartKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html#getStartKey--">getStartKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" title="interface in org.apache.hadoop.hbase.util">KeyRange</a></code></dd>
@@ -447,7 +447,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getEndKey</h4>
-<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3923">getEndKey</a>()</pre>
+<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3968">getEndKey</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html#getEndKey--">getEndKey</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" title="interface in org.apache.hadoop.hbase.util">KeyRange</a></code></dd>
@@ -460,7 +460,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getTableName</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3934">getTableName</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/TableName.html" title="class in org.apache.hadoop.hbase">TableName</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3979">getTableName</a>()</pre>
 </li>
 </ul>
 <a name="getRegionNameAsString--">
@@ -469,7 +469,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionNameAsString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3951">getRegionNameAsString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3996">getRegionNameAsString</a>()</pre>
 </li>
 </ul>
 <a name="getRegionName--">
@@ -478,7 +478,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getRegionName</h4>
-<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3967">getRegionName</a>()</pre>
+<pre>public&nbsp;byte[]&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4012">getRegionName</a>()</pre>
 </li>
 </ul>
 <a name="getPrimaryHRIForDeployedReplica--">
@@ -487,7 +487,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getPrimaryHRIForDeployedReplica</h4>
-<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3981">getPrimaryHRIForDeployedReplica</a>()</pre>
+<pre>public&nbsp;<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4026">getPrimaryHRIForDeployedReplica</a>()</pre>
 </li>
 </ul>
 <a name="getHdfsRegionDir--">
@@ -496,7 +496,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getHdfsRegionDir</h4>
-<pre>org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3985">getHdfsRegionDir</a>()</pre>
+<pre>org.apache.hadoop.fs.Path&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4030">getHdfsRegionDir</a>()</pre>
 </li>
 </ul>
 <a name="containsOnlyHdfsEdits--">
@@ -505,7 +505,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>containsOnlyHdfsEdits</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3992">containsOnlyHdfsEdits</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4037">containsOnlyHdfsEdits</a>()</pre>
 </li>
 </ul>
 <a name="isHdfsRegioninfoPresent--">
@@ -514,7 +514,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>isHdfsRegioninfoPresent</h4>
-<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.3999">isHdfsRegioninfoPresent</a>()</pre>
+<pre>boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4044">isHdfsRegioninfoPresent</a>()</pre>
 </li>
 </ul>
 <a name="getModTime--">
@@ -523,7 +523,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getModTime</h4>
-<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4006">getModTime</a>()</pre>
+<pre>long&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4051">getModTime</a>()</pre>
 </li>
 </ul>
 <a name="getHdfsHRI--">
@@ -532,7 +532,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>getHdfsHRI</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4013">getHdfsHRI</a>()</pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4058">getHdfsHRI</a>()</pre>
 </li>
 </ul>
 <a name="setSkipChecks-boolean-">
@@ -541,7 +541,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>setSkipChecks</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4020">setSkipChecks</a>(boolean&nbsp;skipChecks)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4065">setSkipChecks</a>(boolean&nbsp;skipChecks)</pre>
 </li>
 </ul>
 <a name="isSkipChecks--">
@@ -550,7 +550,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>isSkipChecks</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4024">isSkipChecks</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4069">isSkipChecks</a>()</pre>
 </li>
 </ul>
 <a name="setMerged-boolean-">
@@ -559,7 +559,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockList">
 <li class="blockList">
 <h4>setMerged</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4028">setMerged</a>(boolean&nbsp;isMerged)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4073">setMerged</a>(boolean&nbsp;isMerged)</pre>
 </li>
 </ul>
 <a name="isMerged--">
@@ -568,7 +568,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/KeyRange.html" t
 <ul class="blockListLast">
 <li class="blockList">
 <h4>isMerged</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4032">isMerged</a>()</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html#line.4077">isMerged</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index 348a72a..4c74fed 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -107,7 +107,7 @@
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3843">HBaseFsck.HdfsEntry</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3888">HBaseFsck.HdfsEntry</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 <div class="block">Stores the regioninfo entries from HDFS</div>
 </li>
@@ -201,7 +201,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hri</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3844">hri</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3889">hri</a></pre>
 </li>
 </ul>
 <a name="hdfsRegionDir">
@@ -210,7 +210,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsRegionDir</h4>
-<pre>org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3845">hdfsRegionDir</a></pre>
+<pre>org.apache.hadoop.fs.Path <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3890">hdfsRegionDir</a></pre>
 </li>
 </ul>
 <a name="hdfsRegionDirModTime">
@@ -219,7 +219,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsRegionDirModTime</h4>
-<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3846">hdfsRegionDirModTime</a></pre>
+<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3891">hdfsRegionDirModTime</a></pre>
 </li>
 </ul>
 <a name="hdfsRegioninfoFilePresent">
@@ -228,7 +228,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hdfsRegioninfoFilePresent</h4>
-<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3847">hdfsRegioninfoFilePresent</a></pre>
+<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3892">hdfsRegioninfoFilePresent</a></pre>
 </li>
 </ul>
 <a name="hdfsOnlyEdits">
@@ -237,7 +237,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hdfsOnlyEdits</h4>
-<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3848">hdfsOnlyEdits</a></pre>
+<pre>boolean <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3893">hdfsOnlyEdits</a></pre>
 </li>
 </ul>
 </li>
@@ -254,7 +254,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>HdfsEntry</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3843">HdfsEntry</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html#line.3888">HdfsEntry</a>()</pre>
 </li>
 </ul>
 </li>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index 9e23565..c3e22ec 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3792">HBaseFsck.MetaEntry</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3837">HBaseFsck.MetaEntry</a>
 extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</a></pre>
 <div class="block">Stores the regioninfo entries scanned from META</div>
 </li>
@@ -264,7 +264,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>regionServer</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3793">regionServer</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3838">regionServer</a></pre>
 </li>
 </ul>
 <a name="modTime">
@@ -273,7 +273,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>modTime</h4>
-<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3794">modTime</a></pre>
+<pre>long <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3839">modTime</a></pre>
 </li>
 </ul>
 <a name="splitA">
@@ -282,7 +282,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>splitA</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3795">splitA</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3840">splitA</a></pre>
 </li>
 </ul>
 <a name="splitB">
@@ -291,7 +291,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockListLast">
 <li class="blockList">
 <h4>splitB</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3795">splitB</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3840">splitB</a></pre>
 </li>
 </ul>
 </li>
@@ -308,7 +308,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>MetaEntry</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3797">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3842">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
                  <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;regionServer,
                  long&nbsp;modTime)</pre>
 </li>
@@ -319,7 +319,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockListLast">
 <li class="blockList">
 <h4>MetaEntry</h4>
-<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3801">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
+<pre>public&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3846">MetaEntry</a>(<a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;rinfo,
                  <a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a>&nbsp;regionServer,
                  long&nbsp;modTime,
                  <a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a>&nbsp;splitA,
@@ -340,7 +340,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockList">
 <li class="blockList">
 <h4>equals</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3811">equals</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>&nbsp;o)</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3856">equals</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>&nbsp;o)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html#equals-java.lang.Object-">equals</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</a></code></dd>
@@ -355,7 +355,7 @@ extends <a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title=
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hashCode</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3825">hashCode</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html#line.3870">hashCode</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html#hashCode--">hashCode</a></code>&nbsp;in class&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index 93b3f15..03f5fdc 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3854">HBaseFsck.OnlineEntry</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.3899">HBaseFsck.OnlineEntry</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 <div class="block">Stores the regioninfo retrieved from Online region servers.</div>
 </li>
@@ -206,7 +206,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>hri</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3855">hri</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/client/RegionInfo.html" title="interface in org.apache.hadoop.hbase.client">RegionInfo</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3900">hri</a></pre>
 </li>
 </ul>
 <a name="hsa">
@@ -215,7 +215,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>hsa</h4>
-<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3856">hsa</a></pre>
+<pre><a href="../../../../../org/apache/hadoop/hbase/ServerName.html" title="class in org.apache.hadoop.hbase">ServerName</a> <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3901">hsa</a></pre>
 </li>
 </ul>
 </li>
@@ -232,7 +232,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>OnlineEntry</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3854">OnlineEntry</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3899">OnlineEntry</a>()</pre>
 </li>
 </ul>
 </li>
@@ -249,7 +249,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3859">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html#line.3904">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index b631494..47b9a9e 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4163">HBaseFsck.PrintingErrorReporter</a>
+<pre>static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.4208">HBaseFsck.PrintingErrorReporter</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a>
 implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></pre>
 </li>
@@ -301,7 +301,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>errorCount</h4>
-<pre>public&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4164">errorCount</a></pre>
+<pre>public&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4209">errorCount</a></pre>
 </li>
 </ul>
 <a name="showProgress">
@@ -310,7 +310,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>showProgress</h4>
-<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4165">showProgress</a></pre>
+<pre>private&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4210">showProgress</a></pre>
 </li>
 </ul>
 <a name="progressThreshold">
@@ -319,7 +319,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>progressThreshold</h4>
-<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4167">progressThreshold</a></pre>
+<pre>private static final&nbsp;int <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4212">progressThreshold</a></pre>
 <dl>
 <dt><span class="seeLabel">See Also:</span></dt>
 <dd><a href="../../../../../constant-values.html#org.apache.hadoop.hbase.util.HBaseFsck.PrintingErrorReporter.progressThreshold">Constant Field Values</a></dd>
@@ -332,7 +332,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>errorTables</h4>
-<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4169">errorTables</a></pre>
+<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true" title="class or interface in java.util">Set</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4214">errorTables</a></pre>
 </li>
 </ul>
 <a name="errorList">
@@ -341,7 +341,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockListLast">
 <li class="blockList">
 <h4>errorList</h4>
-<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4172">errorList</a></pre>
+<pre>private&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt; <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4217">errorList</a></pre>
 </li>
 </ul>
 </li>
@@ -358,7 +358,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockListLast">
 <li class="blockList">
 <h4>PrintingErrorReporter</h4>
-<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4163">PrintingErrorReporter</a>()</pre>
+<pre><a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4208">PrintingErrorReporter</a>()</pre>
 </li>
 </ul>
 </li>
@@ -375,7 +375,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>clear</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4175">clear</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4220">clear</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#clear--">clear</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -388,7 +388,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4182">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4227">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
@@ -402,7 +402,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4197">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4242">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 <dl>
@@ -417,7 +417,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4203">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4248">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info)</pre>
@@ -433,7 +433,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4211">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4256">reportError</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&nbsp;errorCode,
                         <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table,
                         <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.HbckInfo</a>&nbsp;info1,
@@ -450,7 +450,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>reportError</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4220">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4265">reportError</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#reportError-java.lang.String-">reportError</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -463,7 +463,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>report</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4230">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4275">report</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <div class="block">Report error information, but do not increment the error count.  Intended for cases
  where the actual error would have been reported previously.</div>
 <dl>
@@ -480,7 +480,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>summarize</h4>
-<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4238">summarize</a>()</pre>
+<pre>public&nbsp;int&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4283">summarize</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#summarize--">summarize</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -493,7 +493,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>getErrorList</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4251">getErrorList</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true" title="class or interface in java.util">ArrayList</a>&lt;<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html" title="enum in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter.ERROR_CODE</a>&gt;&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4296">getErrorList</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#getErrorList--">getErrorList</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -506,7 +506,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>print</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4256">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4301">print</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#print-java.lang.String-">print</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -519,7 +519,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>tableHasErrors</h4>
-<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4263">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
+<pre>public&nbsp;boolean&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4308">tableHasErrors</a>(<a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html" title="class in org.apache.hadoop.hbase.util">HBaseFsck.TableInfo</a>&nbsp;table)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#tableHasErrors-org.apache.hadoop.hbase.util.HBaseFsck.TableInfo-">tableHasErrors</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -532,7 +532,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>resetErrors</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4268">resetErrors</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4313">resetErrors</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#resetErrors--">resetErrors</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -545,7 +545,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockList">
 <li class="blockList">
 <h4>detail</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4273">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4318">detail</a>(<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;message)</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#detail-java.lang.String-">detail</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>
@@ -558,7 +558,7 @@ implements <a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorR
 <ul class="blockListLast">
 <li class="blockList">
 <h4>progress</h4>
-<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4281">progress</a>()</pre>
+<pre>public&nbsp;void&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html#line.4326">progress</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>
 <dd><code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html#progress--">progress</a></code>&nbsp;in interface&nbsp;<code><a href="../../../../../org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html" title="interface in org.apache.hadoop.hbase.util">HBaseFsck.ErrorReporter</a></code></dd>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
----------------------------------------------------------------------
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index 2693c0e..71e0d0d 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 </dl>
 <hr>
 <br>
-<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.839">HBaseFsck.RegionBoundariesInformation</a>
+<pre>private static class <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.html#line.884">HBaseFsck.RegionBoundariesInformation</a>
 extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></pre>
 </li>
 </ul>
@@ -219,7 +219,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>regionName</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.840">regionName</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.885">regionName</a></pre>
 </li>
 </ul>
 <a name="metaFirstKey">
@@ -228,7 +228,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>metaFirstKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.841">metaFirstKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.886">metaFirstKey</a></pre>
 </li>
 </ul>
 <a name="metaLastKey">
@@ -237,7 +237,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>metaLastKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.842">metaLastKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.887">metaLastKey</a></pre>
 </li>
 </ul>
 <a name="storesFirstKey">
@@ -246,7 +246,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockList">
 <li class="blockList">
 <h4>storesFirstKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.843">storesFirstKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.888">storesFirstKey</a></pre>
 </li>
 </ul>
 <a name="storesLastKey">
@@ -255,7 +255,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>storesLastKey</h4>
-<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.844">storesLastKey</a></pre>
+<pre>public&nbsp;byte[] <a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.889">storesLastKey</a></pre>
 </li>
 </ul>
 </li>
@@ -272,7 +272,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>RegionBoundariesInformation</h4>
-<pre>private&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.839">RegionBoundariesInformation</a>()</pre>
+<pre>private&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.884">RegionBoundariesInformation</a>()</pre>
 </li>
 </ul>
 </li>
@@ -289,7 +289,7 @@ extends <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 <ul class="blockListLast">
 <li class="blockList">
 <h4>toString</h4>
-<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.846">toString</a>()</pre>
+<pre>public&nbsp;<a href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a>&nbsp;<a href="../../../../../src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html#line.891">toString</a>()</pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Overrides:</span></dt>
 <dd><code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code>&nbsp;in class&nbsp;<code><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></code></dd>


[08/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      Pair&lt;Path, FSD

<TRUNCATED>

[27/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 7df71bd..a990153 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -201,3634 +201,3643 @@
 <span class="sourceLineNo">193</span>import org.apache.hadoop.hbase.util.Bytes;<a name="line.193"></a>
 <span class="sourceLineNo">194</span>import org.apache.hadoop.hbase.util.CompressionTest;<a name="line.194"></a>
 <span class="sourceLineNo">195</span>import org.apache.hadoop.hbase.util.EncryptionTest;<a name="line.195"></a>
-<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.196"></a>
-<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.197"></a>
-<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.198"></a>
-<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.199"></a>
-<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.200"></a>
-<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.201"></a>
-<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.202"></a>
-<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.203"></a>
-<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.204"></a>
-<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.205"></a>
-<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.206"></a>
-<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.207"></a>
-<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.208"></a>
-<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.209"></a>
-<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.210"></a>
-<span class="sourceLineNo">211</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.211"></a>
-<span class="sourceLineNo">212</span>import org.apache.zookeeper.KeeperException;<a name="line.212"></a>
-<span class="sourceLineNo">213</span>import org.eclipse.jetty.server.Server;<a name="line.213"></a>
-<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.215"></a>
-<span class="sourceLineNo">216</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>import org.slf4j.Logger;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>import org.slf4j.LoggerFactory;<a name="line.218"></a>
-<span class="sourceLineNo">219</span><a name="line.219"></a>
-<span class="sourceLineNo">220</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.220"></a>
-<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.222"></a>
-<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.223"></a>
-<span class="sourceLineNo">224</span><a name="line.224"></a>
-<span class="sourceLineNo">225</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.225"></a>
-<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.226"></a>
-<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.227"></a>
-<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.228"></a>
-<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.230"></a>
-<span class="sourceLineNo">231</span><a name="line.231"></a>
-<span class="sourceLineNo">232</span>/**<a name="line.232"></a>
-<span class="sourceLineNo">233</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.233"></a>
-<span class="sourceLineNo">234</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.234"></a>
-<span class="sourceLineNo">235</span> * run the cluster.  All others park themselves in their constructor until<a name="line.235"></a>
-<span class="sourceLineNo">236</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.236"></a>
-<span class="sourceLineNo">237</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.237"></a>
-<span class="sourceLineNo">238</span> *<a name="line.238"></a>
-<span class="sourceLineNo">239</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.239"></a>
-<span class="sourceLineNo">240</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.240"></a>
-<span class="sourceLineNo">241</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.241"></a>
-<span class="sourceLineNo">242</span> *<a name="line.242"></a>
-<span class="sourceLineNo">243</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a name="line.243"></a>
-<span class="sourceLineNo">244</span> *<a name="line.244"></a>
-<span class="sourceLineNo">245</span> * @see org.apache.zookeeper.Watcher<a name="line.245"></a>
-<span class="sourceLineNo">246</span> */<a name="line.246"></a>
-<span class="sourceLineNo">247</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.247"></a>
-<span class="sourceLineNo">248</span>@SuppressWarnings("deprecation")<a name="line.248"></a>
-<span class="sourceLineNo">249</span>public class HMaster extends HRegionServer implements MasterServices {<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  private static Logger LOG = LoggerFactory.getLogger(HMaster.class);<a name="line.250"></a>
-<span class="sourceLineNo">251</span><a name="line.251"></a>
-<span class="sourceLineNo">252</span>  /**<a name="line.252"></a>
-<span class="sourceLineNo">253</span>   * Protection against zombie master. Started once Master accepts active responsibility and<a name="line.253"></a>
-<span class="sourceLineNo">254</span>   * starts taking over responsibilities. Allows a finite time window before giving up ownership.<a name="line.254"></a>
-<span class="sourceLineNo">255</span>   */<a name="line.255"></a>
-<span class="sourceLineNo">256</span>  private static class InitializationMonitor extends HasThread {<a name="line.256"></a>
-<span class="sourceLineNo">257</span>    /** The amount of time in milliseconds to sleep before checking initialization status. */<a name="line.257"></a>
-<span class="sourceLineNo">258</span>    public static final String TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";<a name="line.258"></a>
-<span class="sourceLineNo">259</span>    public static final long TIMEOUT_DEFAULT = TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);<a name="line.259"></a>
-<span class="sourceLineNo">260</span><a name="line.260"></a>
-<span class="sourceLineNo">261</span>    /**<a name="line.261"></a>
-<span class="sourceLineNo">262</span>     * When timeout expired and initialization has not complete, call {@link System#exit(int)} when<a name="line.262"></a>
-<span class="sourceLineNo">263</span>     * true, do nothing otherwise.<a name="line.263"></a>
-<span class="sourceLineNo">264</span>     */<a name="line.264"></a>
-<span class="sourceLineNo">265</span>    public static final String HALT_KEY = "hbase.master.initializationmonitor.haltontimeout";<a name="line.265"></a>
-<span class="sourceLineNo">266</span>    public static final boolean HALT_DEFAULT = false;<a name="line.266"></a>
-<span class="sourceLineNo">267</span><a name="line.267"></a>
-<span class="sourceLineNo">268</span>    private final HMaster master;<a name="line.268"></a>
-<span class="sourceLineNo">269</span>    private final long timeout;<a name="line.269"></a>
-<span class="sourceLineNo">270</span>    private final boolean haltOnTimeout;<a name="line.270"></a>
-<span class="sourceLineNo">271</span><a name="line.271"></a>
-<span class="sourceLineNo">272</span>    /** Creates a Thread that monitors the {@link #isInitialized()} state. */<a name="line.272"></a>
-<span class="sourceLineNo">273</span>    InitializationMonitor(HMaster master) {<a name="line.273"></a>
-<span class="sourceLineNo">274</span>      super("MasterInitializationMonitor");<a name="line.274"></a>
-<span class="sourceLineNo">275</span>      this.master = master;<a name="line.275"></a>
-<span class="sourceLineNo">276</span>      this.timeout = master.getConfiguration().getLong(TIMEOUT_KEY, TIMEOUT_DEFAULT);<a name="line.276"></a>
-<span class="sourceLineNo">277</span>      this.haltOnTimeout = master.getConfiguration().getBoolean(HALT_KEY, HALT_DEFAULT);<a name="line.277"></a>
-<span class="sourceLineNo">278</span>      this.setDaemon(true);<a name="line.278"></a>
-<span class="sourceLineNo">279</span>    }<a name="line.279"></a>
-<span class="sourceLineNo">280</span><a name="line.280"></a>
-<span class="sourceLineNo">281</span>    @Override<a name="line.281"></a>
-<span class="sourceLineNo">282</span>    public void run() {<a name="line.282"></a>
-<span class="sourceLineNo">283</span>      try {<a name="line.283"></a>
-<span class="sourceLineNo">284</span>        while (!master.isStopped() &amp;&amp; master.isActiveMaster()) {<a name="line.284"></a>
-<span class="sourceLineNo">285</span>          Thread.sleep(timeout);<a name="line.285"></a>
-<span class="sourceLineNo">286</span>          if (master.isInitialized()) {<a name="line.286"></a>
-<span class="sourceLineNo">287</span>            LOG.debug("Initialization completed within allotted tolerance. Monitor exiting.");<a name="line.287"></a>
-<span class="sourceLineNo">288</span>          } else {<a name="line.288"></a>
-<span class="sourceLineNo">289</span>            LOG.error("Master failed to complete initialization after " + timeout + "ms. Please"<a name="line.289"></a>
-<span class="sourceLineNo">290</span>                + " consider submitting a bug report including a thread dump of this process.");<a name="line.290"></a>
-<span class="sourceLineNo">291</span>            if (haltOnTimeout) {<a name="line.291"></a>
-<span class="sourceLineNo">292</span>              LOG.error("Zombie Master exiting. Thread dump to stdout");<a name="line.292"></a>
-<span class="sourceLineNo">293</span>              Threads.printThreadInfo(System.out, "Zombie HMaster");<a name="line.293"></a>
-<span class="sourceLineNo">294</span>              System.exit(-1);<a name="line.294"></a>
-<span class="sourceLineNo">295</span>            }<a name="line.295"></a>
-<span class="sourceLineNo">296</span>          }<a name="line.296"></a>
-<span class="sourceLineNo">297</span>        }<a name="line.297"></a>
-<span class="sourceLineNo">298</span>      } catch (InterruptedException ie) {<a name="line.298"></a>
-<span class="sourceLineNo">299</span>        LOG.trace("InitMonitor thread interrupted. Existing.");<a name="line.299"></a>
-<span class="sourceLineNo">300</span>      }<a name="line.300"></a>
-<span class="sourceLineNo">301</span>    }<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  }<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  // MASTER is name of the webapp and the attribute name used stuffing this<a name="line.304"></a>
-<span class="sourceLineNo">305</span>  //instance into web context.<a name="line.305"></a>
-<span class="sourceLineNo">306</span>  public static final String MASTER = "master";<a name="line.306"></a>
-<span class="sourceLineNo">307</span><a name="line.307"></a>
-<span class="sourceLineNo">308</span>  // Manager and zk listener for master election<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private final ActiveMasterManager activeMasterManager;<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Region server tracker<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private RegionServerTracker regionServerTracker;<a name="line.311"></a>
-<span class="sourceLineNo">312</span>  // Draining region server tracker<a name="line.312"></a>
-<span class="sourceLineNo">313</span>  private DrainingServerTracker drainingServerTracker;<a name="line.313"></a>
-<span class="sourceLineNo">314</span>  // Tracker for load balancer state<a name="line.314"></a>
-<span class="sourceLineNo">315</span>  LoadBalancerTracker loadBalancerTracker;<a name="line.315"></a>
-<span class="sourceLineNo">316</span>  // Tracker for meta location, if any client ZK quorum specified<a name="line.316"></a>
-<span class="sourceLineNo">317</span>  MetaLocationSyncer metaLocationSyncer;<a name="line.317"></a>
-<span class="sourceLineNo">318</span>  // Tracker for active master location, if any client ZK quorum specified<a name="line.318"></a>
-<span class="sourceLineNo">319</span>  MasterAddressSyncer masterAddressSyncer;<a name="line.319"></a>
-<span class="sourceLineNo">320</span><a name="line.320"></a>
-<span class="sourceLineNo">321</span>  // Tracker for split and merge state<a name="line.321"></a>
-<span class="sourceLineNo">322</span>  private SplitOrMergeTracker splitOrMergeTracker;<a name="line.322"></a>
-<span class="sourceLineNo">323</span><a name="line.323"></a>
-<span class="sourceLineNo">324</span>  // Tracker for region normalizer state<a name="line.324"></a>
-<span class="sourceLineNo">325</span>  private RegionNormalizerTracker regionNormalizerTracker;<a name="line.325"></a>
-<span class="sourceLineNo">326</span><a name="line.326"></a>
-<span class="sourceLineNo">327</span>  //Tracker for master maintenance mode setting<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private MasterMaintenanceModeTracker maintenanceModeTracker;<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private ClusterSchemaService clusterSchemaService;<a name="line.330"></a>
-<span class="sourceLineNo">331</span><a name="line.331"></a>
-<span class="sourceLineNo">332</span>  public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =<a name="line.332"></a>
-<span class="sourceLineNo">333</span>    "hbase.master.wait.on.service.seconds";<a name="line.333"></a>
-<span class="sourceLineNo">334</span>  public static final int DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS = 5 * 60;<a name="line.334"></a>
-<span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>  // Metrics for the HMaster<a name="line.336"></a>
-<span class="sourceLineNo">337</span>  final MetricsMaster metricsMaster;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  // file system manager for the master FS operations<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private MasterFileSystem fileSystemManager;<a name="line.339"></a>
-<span class="sourceLineNo">340</span>  private MasterWalManager walManager;<a name="line.340"></a>
-<span class="sourceLineNo">341</span><a name="line.341"></a>
-<span class="sourceLineNo">342</span>  // server manager to deal with region server info<a name="line.342"></a>
-<span class="sourceLineNo">343</span>  private volatile ServerManager serverManager;<a name="line.343"></a>
-<span class="sourceLineNo">344</span><a name="line.344"></a>
-<span class="sourceLineNo">345</span>  // manager of assignment nodes in zookeeper<a name="line.345"></a>
-<span class="sourceLineNo">346</span>  private AssignmentManager assignmentManager;<a name="line.346"></a>
-<span class="sourceLineNo">347</span><a name="line.347"></a>
-<span class="sourceLineNo">348</span>  // manager of replication<a name="line.348"></a>
-<span class="sourceLineNo">349</span>  private ReplicationPeerManager replicationPeerManager;<a name="line.349"></a>
-<span class="sourceLineNo">350</span><a name="line.350"></a>
-<span class="sourceLineNo">351</span>  private SyncReplicationReplayWALManager syncReplicationReplayWALManager;<a name="line.351"></a>
-<span class="sourceLineNo">352</span><a name="line.352"></a>
-<span class="sourceLineNo">353</span>  // buffer for "fatal error" notices from region servers<a name="line.353"></a>
-<span class="sourceLineNo">354</span>  // in the cluster. This is only used for assisting<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  // operations/debugging.<a name="line.355"></a>
-<span class="sourceLineNo">356</span>  MemoryBoundedLogMessageBuffer rsFatals;<a name="line.356"></a>
-<span class="sourceLineNo">357</span><a name="line.357"></a>
-<span class="sourceLineNo">358</span>  // flag set after we become the active master (used for testing)<a name="line.358"></a>
-<span class="sourceLineNo">359</span>  private volatile boolean activeMaster = false;<a name="line.359"></a>
-<span class="sourceLineNo">360</span><a name="line.360"></a>
-<span class="sourceLineNo">361</span>  // flag set after we complete initialization once active<a name="line.361"></a>
-<span class="sourceLineNo">362</span>  private final ProcedureEvent&lt;?&gt; initialized = new ProcedureEvent&lt;&gt;("master initialized");<a name="line.362"></a>
-<span class="sourceLineNo">363</span><a name="line.363"></a>
-<span class="sourceLineNo">364</span>  // flag set after master services are started,<a name="line.364"></a>
-<span class="sourceLineNo">365</span>  // initialization may have not completed yet.<a name="line.365"></a>
-<span class="sourceLineNo">366</span>  volatile boolean serviceStarted = false;<a name="line.366"></a>
-<span class="sourceLineNo">367</span><a name="line.367"></a>
-<span class="sourceLineNo">368</span>  // Maximum time we should run balancer for<a name="line.368"></a>
-<span class="sourceLineNo">369</span>  private final int maxBlancingTime;<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  // Maximum percent of regions in transition when balancing<a name="line.370"></a>
-<span class="sourceLineNo">371</span>  private final double maxRitPercent;<a name="line.371"></a>
-<span class="sourceLineNo">372</span><a name="line.372"></a>
-<span class="sourceLineNo">373</span>  private final LockManager lockManager = new LockManager(this);<a name="line.373"></a>
-<span class="sourceLineNo">374</span><a name="line.374"></a>
-<span class="sourceLineNo">375</span>  private LoadBalancer balancer;<a name="line.375"></a>
-<span class="sourceLineNo">376</span>  private RegionNormalizer normalizer;<a name="line.376"></a>
-<span class="sourceLineNo">377</span>  private BalancerChore balancerChore;<a name="line.377"></a>
-<span class="sourceLineNo">378</span>  private RegionNormalizerChore normalizerChore;<a name="line.378"></a>
-<span class="sourceLineNo">379</span>  private ClusterStatusChore clusterStatusChore;<a name="line.379"></a>
-<span class="sourceLineNo">380</span>  private ClusterStatusPublisher clusterStatusPublisherChore = null;<a name="line.380"></a>
-<span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  CatalogJanitor catalogJanitorChore;<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  private LogCleaner logCleaner;<a name="line.383"></a>
-<span class="sourceLineNo">384</span>  private HFileCleaner hfileCleaner;<a name="line.384"></a>
-<span class="sourceLineNo">385</span>  private ReplicationBarrierCleaner replicationBarrierCleaner;<a name="line.385"></a>
-<span class="sourceLineNo">386</span>  private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  private MobCompactionChore mobCompactChore;<a name="line.387"></a>
-<span class="sourceLineNo">388</span>  private MasterMobCompactionThread mobCompactThread;<a name="line.388"></a>
-<span class="sourceLineNo">389</span>  // used to synchronize the mobCompactionStates<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  private final IdLock mobCompactionLock = new IdLock();<a name="line.390"></a>
-<span class="sourceLineNo">391</span>  // save the information of mob compactions in tables.<a name="line.391"></a>
-<span class="sourceLineNo">392</span>  // the key is table name, the value is the number of compactions in that table.<a name="line.392"></a>
-<span class="sourceLineNo">393</span>  private Map&lt;TableName, AtomicInteger&gt; mobCompactionStates = Maps.newConcurrentMap();<a name="line.393"></a>
-<span class="sourceLineNo">394</span><a name="line.394"></a>
-<span class="sourceLineNo">395</span>  MasterCoprocessorHost cpHost;<a name="line.395"></a>
-<span class="sourceLineNo">396</span><a name="line.396"></a>
-<span class="sourceLineNo">397</span>  private final boolean preLoadTableDescriptors;<a name="line.397"></a>
-<span class="sourceLineNo">398</span><a name="line.398"></a>
-<span class="sourceLineNo">399</span>  // Time stamps for when a hmaster became active<a name="line.399"></a>
-<span class="sourceLineNo">400</span>  private long masterActiveTime;<a name="line.400"></a>
-<span class="sourceLineNo">401</span><a name="line.401"></a>
-<span class="sourceLineNo">402</span>  // Time stamp for when HMaster finishes becoming Active Master<a name="line.402"></a>
-<span class="sourceLineNo">403</span>  private long masterFinishedInitializationTime;<a name="line.403"></a>
-<span class="sourceLineNo">404</span><a name="line.404"></a>
-<span class="sourceLineNo">405</span>  //should we check the compression codec type at master side, default true, HBASE-6370<a name="line.405"></a>
-<span class="sourceLineNo">406</span>  private final boolean masterCheckCompression;<a name="line.406"></a>
-<span class="sourceLineNo">407</span><a name="line.407"></a>
-<span class="sourceLineNo">408</span>  //should we check encryption settings at master side, default true<a name="line.408"></a>
-<span class="sourceLineNo">409</span>  private final boolean masterCheckEncryption;<a name="line.409"></a>
-<span class="sourceLineNo">410</span><a name="line.410"></a>
-<span class="sourceLineNo">411</span>  Map&lt;String, Service&gt; coprocessorServiceHandlers = Maps.newHashMap();<a name="line.411"></a>
-<span class="sourceLineNo">412</span><a name="line.412"></a>
-<span class="sourceLineNo">413</span>  // monitor for snapshot of hbase tables<a name="line.413"></a>
-<span class="sourceLineNo">414</span>  SnapshotManager snapshotManager;<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  // monitor for distributed procedures<a name="line.415"></a>
-<span class="sourceLineNo">416</span>  private MasterProcedureManagerHost mpmHost;<a name="line.416"></a>
-<span class="sourceLineNo">417</span><a name="line.417"></a>
-<span class="sourceLineNo">418</span>  // it is assigned after 'initialized' guard set to true, so should be volatile<a name="line.418"></a>
-<span class="sourceLineNo">419</span>  private volatile MasterQuotaManager quotaManager;<a name="line.419"></a>
-<span class="sourceLineNo">420</span>  private SpaceQuotaSnapshotNotifier spaceQuotaSnapshotNotifier;<a name="line.420"></a>
-<span class="sourceLineNo">421</span>  private QuotaObserverChore quotaObserverChore;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>  private SnapshotQuotaObserverChore snapshotQuotaChore;<a name="line.422"></a>
-<span class="sourceLineNo">423</span><a name="line.423"></a>
-<span class="sourceLineNo">424</span>  private ProcedureExecutor&lt;MasterProcedureEnv&gt; procedureExecutor;<a name="line.424"></a>
-<span class="sourceLineNo">425</span>  private WALProcedureStore procedureStore;<a name="line.425"></a>
-<span class="sourceLineNo">426</span><a name="line.426"></a>
-<span class="sourceLineNo">427</span>  // handle table states<a name="line.427"></a>
-<span class="sourceLineNo">428</span>  private TableStateManager tableStateManager;<a name="line.428"></a>
-<span class="sourceLineNo">429</span><a name="line.429"></a>
-<span class="sourceLineNo">430</span>  private long splitPlanCount;<a name="line.430"></a>
-<span class="sourceLineNo">431</span>  private long mergePlanCount;<a name="line.431"></a>
-<span class="sourceLineNo">432</span><a name="line.432"></a>
-<span class="sourceLineNo">433</span>  /* Handle favored nodes information */<a name="line.433"></a>
-<span class="sourceLineNo">434</span>  private FavoredNodesManager favoredNodesManager;<a name="line.434"></a>
-<span class="sourceLineNo">435</span><a name="line.435"></a>
-<span class="sourceLineNo">436</span>  /** jetty server for master to redirect requests to regionserver infoServer */<a name="line.436"></a>
-<span class="sourceLineNo">437</span>  private Server masterJettyServer;<a name="line.437"></a>
-<span class="sourceLineNo">438</span><a name="line.438"></a>
-<span class="sourceLineNo">439</span>  public static class RedirectServlet extends HttpServlet {<a name="line.439"></a>
-<span class="sourceLineNo">440</span>    private static final long serialVersionUID = 2894774810058302473L;<a name="line.440"></a>
-<span class="sourceLineNo">441</span>    private final int regionServerInfoPort;<a name="line.441"></a>
-<span class="sourceLineNo">442</span>    private final String regionServerHostname;<a name="line.442"></a>
-<span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>    /**<a name="line.444"></a>
-<span class="sourceLineNo">445</span>     * @param infoServer that we're trying to send all requests to<a name="line.445"></a>
-<span class="sourceLineNo">446</span>     * @param hostname may be null. if given, will be used for redirects instead of host from client.<a name="line.446"></a>
-<span class="sourceLineNo">447</span>     */<a name="line.447"></a>
-<span class="sourceLineNo">448</span>    public RedirectServlet(InfoServer infoServer, String hostname) {<a name="line.448"></a>
-<span class="sourceLineNo">449</span>       regionServerInfoPort = infoServer.getPort();<a name="line.449"></a>
-<span class="sourceLineNo">450</span>       regionServerHostname = hostname;<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    }<a name="line.451"></a>
-<span class="sourceLineNo">452</span><a name="line.452"></a>
-<span class="sourceLineNo">453</span>    @Override<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    public void doGet(HttpServletRequest request,<a name="line.454"></a>
-<span class="sourceLineNo">455</span>        HttpServletResponse response) throws ServletException, IOException {<a name="line.455"></a>
-<span class="sourceLineNo">456</span>      String redirectHost = regionServerHostname;<a name="line.456"></a>
-<span class="sourceLineNo">457</span>      if(redirectHost == null) {<a name="line.457"></a>
-<span class="sourceLineNo">458</span>        redirectHost = request.getServerName();<a name="line.458"></a>
-<span class="sourceLineNo">459</span>        if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {<a name="line.459"></a>
-<span class="sourceLineNo">460</span>          LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +<a name="line.460"></a>
-<span class="sourceLineNo">461</span>              MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +<a name="line.461"></a>
-<span class="sourceLineNo">462</span>              "your HBase deployment relies on client accessible names that the region server process " +<a name="line.462"></a>
-<span class="sourceLineNo">463</span>              "can't resolve locally, then you should set the previously mentioned configuration variable " +<a name="line.463"></a>
-<span class="sourceLineNo">464</span>              "to an appropriate hostname.");<a name="line.464"></a>
-<span class="sourceLineNo">465</span>          // no sending client provided input back to the client, so the goal host is just in the logs.<a name="line.465"></a>
-<span class="sourceLineNo">466</span>          response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +<a name="line.466"></a>
-<span class="sourceLineNo">467</span>              "this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +<a name="line.467"></a>
-<span class="sourceLineNo">468</span>              "administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");<a name="line.468"></a>
-<span class="sourceLineNo">469</span>          return;<a name="line.469"></a>
-<span class="sourceLineNo">470</span>        }<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      }<a name="line.471"></a>
-<span class="sourceLineNo">472</span>      // TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      // host and port we're using, but it's buried way too deep to do that ATM.<a name="line.473"></a>
-<span class="sourceLineNo">474</span>      String redirectUrl = request.getScheme() + "://"<a name="line.474"></a>
-<span class="sourceLineNo">475</span>        + redirectHost + ":" + regionServerInfoPort<a name="line.475"></a>
-<span class="sourceLineNo">476</span>        + request.getRequestURI();<a name="line.476"></a>
-<span class="sourceLineNo">477</span>      response.sendRedirect(redirectUrl);<a name="line.477"></a>
-<span class="sourceLineNo">478</span>    }<a name="line.478"></a>
-<span class="sourceLineNo">479</span>  }<a name="line.479"></a>
-<span class="sourceLineNo">480</span><a name="line.480"></a>
-<span class="sourceLineNo">481</span>  /**<a name="line.481"></a>
-<span class="sourceLineNo">482</span>   * Initializes the HMaster. The steps are as follows:<a name="line.482"></a>
-<span class="sourceLineNo">483</span>   * &lt;p&gt;<a name="line.483"></a>
-<span class="sourceLineNo">484</span>   * &lt;ol&gt;<a name="line.484"></a>
-<span class="sourceLineNo">485</span>   * &lt;li&gt;Initialize the local HRegionServer<a name="line.485"></a>
-<span class="sourceLineNo">486</span>   * &lt;li&gt;Start the ActiveMasterManager.<a name="line.486"></a>
-<span class="sourceLineNo">487</span>   * &lt;/ol&gt;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>   * &lt;p&gt;<a name="line.488"></a>
-<span class="sourceLineNo">489</span>   * Remaining steps of initialization occur in<a name="line.489"></a>
-<span class="sourceLineNo">490</span>   * #finishActiveMasterInitialization(MonitoredTask) after<a name="line.490"></a>
-<span class="sourceLineNo">491</span>   * the master becomes the active one.<a name="line.491"></a>
-<span class="sourceLineNo">492</span>   */<a name="line.492"></a>
-<span class="sourceLineNo">493</span>  public HMaster(final Configuration conf)<a name="line.493"></a>
-<span class="sourceLineNo">494</span>      throws IOException, KeeperException {<a name="line.494"></a>
-<span class="sourceLineNo">495</span>    super(conf);<a name="line.495"></a>
-<span class="sourceLineNo">496</span>    TraceUtil.initTracer(conf);<a name="line.496"></a>
-<span class="sourceLineNo">497</span>    try {<a name="line.497"></a>
-<span class="sourceLineNo">498</span>      this.rsFatals = new MemoryBoundedLogMessageBuffer(<a name="line.498"></a>
-<span class="sourceLineNo">499</span>          conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));<a name="line.499"></a>
-<span class="sourceLineNo">500</span>      LOG.info("hbase.rootdir=" + getRootDir() +<a name="line.500"></a>
-<span class="sourceLineNo">501</span>          ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));<a name="line.501"></a>
-<span class="sourceLineNo">502</span><a name="line.502"></a>
-<span class="sourceLineNo">503</span>      // Disable usage of meta replicas in the master<a name="line.503"></a>
-<span class="sourceLineNo">504</span>      this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>      decorateMasterConfiguration(this.conf);<a name="line.506"></a>
-<span class="sourceLineNo">507</span><a name="line.507"></a>
-<span class="sourceLineNo">508</span>      // Hack! Maps DFSClient =&gt; Master for logs.  HDFS made this<a name="line.508"></a>
-<span class="sourceLineNo">509</span>      // config param for task trackers, but we can piggyback off of it.<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      if (this.conf.get("mapreduce.task.attempt.id") == null) {<a name="line.510"></a>
-<span class="sourceLineNo">511</span>        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      }<a name="line.512"></a>
-<span class="sourceLineNo">513</span><a name="line.513"></a>
-<span class="sourceLineNo">514</span>      // should we check the compression codec type at master side, default true, HBASE-6370<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);<a name="line.515"></a>
-<span class="sourceLineNo">516</span><a name="line.516"></a>
-<span class="sourceLineNo">517</span>      // should we check encryption settings at master side, default true<a name="line.517"></a>
-<span class="sourceLineNo">518</span>      this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);<a name="line.518"></a>
-<span class="sourceLineNo">519</span><a name="line.519"></a>
-<span class="sourceLineNo">520</span>      this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));<a name="line.520"></a>
-<span class="sourceLineNo">521</span><a name="line.521"></a>
-<span class="sourceLineNo">522</span>      // preload table descriptor at startup<a name="line.522"></a>
-<span class="sourceLineNo">523</span>      this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);<a name="line.523"></a>
-<span class="sourceLineNo">524</span><a name="line.524"></a>
-<span class="sourceLineNo">525</span>      this.maxBlancingTime = getMaxBalancingTime();<a name="line.525"></a>
-<span class="sourceLineNo">526</span>      this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,<a name="line.526"></a>
-<span class="sourceLineNo">527</span>          HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);<a name="line.527"></a>
-<span class="sourceLineNo">528</span><a name="line.528"></a>
-<span class="sourceLineNo">529</span>      // Do we publish the status?<a name="line.529"></a>
-<span class="sourceLineNo">530</span><a name="line.530"></a>
-<span class="sourceLineNo">531</span>      boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,<a name="line.531"></a>
-<span class="sourceLineNo">532</span>          HConstants.STATUS_PUBLISHED_DEFAULT);<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      Class&lt;? extends ClusterStatusPublisher.Publisher&gt; publisherClass =<a name="line.533"></a>
-<span class="sourceLineNo">534</span>          conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,<a name="line.534"></a>
-<span class="sourceLineNo">535</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,<a name="line.535"></a>
-<span class="sourceLineNo">536</span>              ClusterStatusPublisher.Publisher.class);<a name="line.536"></a>
-<span class="sourceLineNo">537</span><a name="line.537"></a>
-<span class="sourceLineNo">538</span>      if (shouldPublish) {<a name="line.538"></a>
-<span class="sourceLineNo">539</span>        if (publisherClass == null) {<a name="line.539"></a>
-<span class="sourceLineNo">540</span>          LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +<a name="line.540"></a>
-<span class="sourceLineNo">541</span>              ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +<a name="line.541"></a>
-<span class="sourceLineNo">542</span>              " is not set - not publishing status");<a name="line.542"></a>
-<span class="sourceLineNo">543</span>        } else {<a name="line.543"></a>
-<span class="sourceLineNo">544</span>          clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>          getChoreService().scheduleChore(clusterStatusPublisherChore);<a name="line.545"></a>
-<span class="sourceLineNo">546</span>        }<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      }<a name="line.547"></a>
-<span class="sourceLineNo">548</span><a name="line.548"></a>
-<span class="sourceLineNo">549</span>      // Some unit tests don't need a cluster, so no zookeeper at all<a name="line.549"></a>
-<span class="sourceLineNo">550</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.550"></a>
-<span class="sourceLineNo">551</span>        this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);<a name="line.551"></a>
-<span class="sourceLineNo">552</span>      } else {<a name="line.552"></a>
-<span class="sourceLineNo">553</span>        this.activeMasterManager = null;<a name="line.553"></a>
-<span class="sourceLineNo">554</span>      }<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    } catch (Throwable t) {<a name="line.555"></a>
-<span class="sourceLineNo">556</span>      // Make sure we log the exception. HMaster is often started via reflection and the<a name="line.556"></a>
-<span class="sourceLineNo">557</span>      // cause of failed startup is lost.<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      LOG.error("Failed construction of Master", t);<a name="line.558"></a>
-<span class="sourceLineNo">559</span>      throw t;<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>  }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>  @Override<a name="line.563"></a>
-<span class="sourceLineNo">564</span>  protected String getUseThisHostnameInstead(Configuration conf) {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    return conf.get(MASTER_HOSTNAME_KEY);<a name="line.565"></a>
-<span class="sourceLineNo">566</span>  }<a name="line.566"></a>
-<span class="sourceLineNo">567</span><a name="line.567"></a>
-<span class="sourceLineNo">568</span>  // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will<a name="line.568"></a>
-<span class="sourceLineNo">569</span>  // block in here until then.<a name="line.569"></a>
-<span class="sourceLineNo">570</span>  @Override<a name="line.570"></a>
-<span class="sourceLineNo">571</span>  public void run() {<a name="line.571"></a>
-<span class="sourceLineNo">572</span>    try {<a name="line.572"></a>
-<span class="sourceLineNo">573</span>      if (!conf.getBoolean("hbase.testing.nocluster", false)) {<a name="line.573"></a>
-<span class="sourceLineNo">574</span>        Threads.setDaemonThreadRunning(new Thread(() -&gt; {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>          try {<a name="line.575"></a>
-<span class="sourceLineNo">576</span>            int infoPort = putUpJettyServer();<a name="line.576"></a>
-<span class="sourceLineNo">577</span>            startActiveMasterManager(infoPort);<a name="line.577"></a>
-<span class="sourceLineNo">578</span>          } catch (Throwable t) {<a name="line.578"></a>
-<span class="sourceLineNo">579</span>            // Make sure we log the exception.<a name="line.579"></a>
-<span class="sourceLineNo">580</span>            String error = "Failed to become Active Master";<a name="line.580"></a>
-<span class="sourceLineNo">581</span>            LOG.error(error, t);<a name="line.581"></a>
-<span class="sourceLineNo">582</span>            // Abort should have been called already.<a name="line.582"></a>
-<span class="sourceLineNo">583</span>            if (!isAborted()) {<a name="line.583"></a>
-<span class="sourceLineNo">584</span>              abort(error, t);<a name="line.584"></a>
-<span class="sourceLineNo">585</span>            }<a name="line.585"></a>
-<span class="sourceLineNo">586</span>          }<a name="line.586"></a>
-<span class="sourceLineNo">587</span>        }));<a name="line.587"></a>
-<span class="sourceLineNo">588</span>      }<a name="line.588"></a>
-<span class="sourceLineNo">589</span>      // Fall in here even if we have been aborted. Need to run the shutdown services and<a name="line.589"></a>
-<span class="sourceLineNo">590</span>      // the super run call will do this for us.<a name="line.590"></a>
-<span class="sourceLineNo">591</span>      super.run();<a name="line.591"></a>
-<span class="sourceLineNo">592</span>    } finally {<a name="line.592"></a>
-<span class="sourceLineNo">593</span>      if (this.clusterSchemaService != null) {<a name="line.593"></a>
-<span class="sourceLineNo">594</span>        // If on way out, then we are no longer active master.<a name="line.594"></a>
-<span class="sourceLineNo">595</span>        this.clusterSchemaService.stopAsync();<a name="line.595"></a>
-<span class="sourceLineNo">596</span>        try {<a name="line.596"></a>
-<span class="sourceLineNo">597</span>          this.clusterSchemaService.awaitTerminated(<a name="line.597"></a>
-<span class="sourceLineNo">598</span>              getConfiguration().getInt(HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,<a name="line.598"></a>
-<span class="sourceLineNo">599</span>              DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);<a name="line.599"></a>
-<span class="sourceLineNo">600</span>        } catch (TimeoutException te) {<a name="line.600"></a>
-<span class="sourceLineNo">601</span>          LOG.warn("Failed shutdown of clusterSchemaService", te);<a name="line.601"></a>
-<span class="sourceLineNo">602</span>        }<a name="line.602"></a>
-<span class="sourceLineNo">603</span>      }<a name="line.603"></a>
-<span class="sourceLineNo">604</span>      this.activeMaster = false;<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    }<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  }<a name="line.606"></a>
-<span class="sourceLineNo">607</span><a name="line.607"></a>
-<span class="sourceLineNo">608</span>  // return the actual infoPort, -1 means disable info server.<a name="line.608"></a>
-<span class="sourceLineNo">609</span>  private int putUpJettyServer() throws IOException {<a name="line.609"></a>
-<span class="sourceLineNo">610</span>    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {<a name="line.610"></a>
-<span class="sourceLineNo">611</span>      return -1;<a name="line.611"></a>
-<span class="sourceLineNo">612</span>    }<a name="line.612"></a>
-<span class="sourceLineNo">613</span>    final int infoPort = conf.getInt("hbase.master.info.port.orig",<a name="line.613"></a>
-<span class="sourceLineNo">614</span>      HConstants.DEFAULT_MASTER_INFOPORT);<a name="line.614"></a>
-<span class="sourceLineNo">615</span>    // -1 is for disabling info server, so no redirecting<a name="line.615"></a>
-<span class="sourceLineNo">616</span>    if (infoPort &lt; 0 || infoServer == null) {<a name="line.616"></a>
-<span class="sourceLineNo">617</span>      return -1;<a name="line.617"></a>
-<span class="sourceLineNo">618</span>    }<a name="line.618"></a>
-<span class="sourceLineNo">619</span>    if(infoPort == infoServer.getPort()) {<a name="line.619"></a>
-<span class="sourceLineNo">620</span>      return infoPort;<a name="line.620"></a>
-<span class="sourceLineNo">621</span>    }<a name="line.621"></a>
-<span class="sourceLineNo">622</span>    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");<a name="line.622"></a>
-<span class="sourceLineNo">623</span>    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {<a name="line.623"></a>
-<span class="sourceLineNo">624</span>      String msg =<a name="line.624"></a>
-<span class="sourceLineNo">625</span>          "Failed to start redirecting jetty server. Address " + addr<a name="line.625"></a>
-<span class="sourceLineNo">626</span>              + " does not belong to this host. Correct configuration parameter: "<a name="line.626"></a>
-<span class="sourceLineNo">627</span>              + "hbase.master.info.bindAddress";<a name="line.627"></a>
-<span class="sourceLineNo">628</span>      LOG.error(msg);<a name="line.628"></a>
-<span class="sourceLineNo">629</span>      throw new IOException(msg);<a name="line.629"></a>
-<span class="sourceLineNo">630</span>    }<a name="line.630"></a>
-<span class="sourceLineNo">631</span><a name="line.631"></a>
-<span class="sourceLineNo">632</span>    // TODO I'm pretty sure we could just add another binding to the InfoServer run by<a name="line.632"></a>
-<span class="sourceLineNo">633</span>    // the RegionServer and have it run the RedirectServlet instead of standing up<a name="line.633"></a>
-<span class="sourceLineNo">634</span>    // a second entire stack here.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>    masterJettyServer = new Server();<a name="line.635"></a>
-<span class="sourceLineNo">636</span>    final ServerConnector connector = new ServerConnector(masterJettyServer);<a name="line.636"></a>
-<span class="sourceLineNo">637</span>    connector.setHost(addr);<a name="line.637"></a>
-<span class="sourceLineNo">638</span>    connector.setPort(infoPort);<a name="line.638"></a>
-<span class="sourceLineNo">639</span>    masterJettyServer.addConnector(connector);<a name="line.639"></a>
-<span class="sourceLineNo">640</span>    masterJettyServer.setStopAtShutdown(true);<a name="line.640"></a>
-<span class="sourceLineNo">641</span><a name="line.641"></a>
-<span class="sourceLineNo">642</span>    final String redirectHostname =<a name="line.642"></a>
-<span class="sourceLineNo">643</span>        StringUtils.isBlank(useThisHostnameInstead) ? null : useThisHostnameInstead;<a name="line.643"></a>
-<span class="sourceLineNo">644</span><a name="line.644"></a>
-<span class="sourceLineNo">645</span>    final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);<a name="line.645"></a>
-<span class="sourceLineNo">646</span>    final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);<a name="line.646"></a>
-<span class="sourceLineNo">647</span>    context.addServlet(new ServletHolder(redirect), "/*");<a name="line.647"></a>
-<span class="sourceLineNo">648</span>    context.setServer(masterJettyServer);<a name="line.648"></a>
-<span class="sourceLineNo">649</span><a name="line.649"></a>
-<span class="sourceLineNo">650</span>    try {<a name="line.650"></a>
-<span class="sourceLineNo">651</span>      masterJettyServer.start();<a name="line.651"></a>
-<span class="sourceLineNo">652</span>    } catch (Exception e) {<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      throw new IOException("Failed to start redirecting jetty server", e);<a name="line.653"></a>
-<span class="sourceLineNo">654</span>    }<a name="line.654"></a>
-<span class="sourceLineNo">655</span>    return connector.getLocalPort();<a name="line.655"></a>
-<span class="sourceLineNo">656</span>  }<a name="line.656"></a>
-<span class="sourceLineNo">657</span><a name="line.657"></a>
-<span class="sourceLineNo">658</span>  @Override<a name="line.658"></a>
-<span class="sourceLineNo">659</span>  protected Function&lt;TableDescriptorBuilder, TableDescriptorBuilder&gt; getMetaTableObserver() {<a name="line.659"></a>
-<span class="sourceLineNo">660</span>    return builder -&gt; builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));<a name="line.660"></a>
-<span class="sourceLineNo">661</span>  }<a name="line.661"></a>
-<span class="sourceLineNo">662</span>  /**<a name="line.662"></a>
-<span class="sourceLineNo">663</span>   * For compatibility, if failed with regionserver credentials, try the master one<a name="line.663"></a>
-<span class="sourceLineNo">664</span>   */<a name="line.664"></a>
-<span class="sourceLineNo">665</span>  @Override<a name="line.665"></a>
-<span class="sourceLineNo">666</span>  protected void login(UserProvider user, String host) throws IOException {<a name="line.666"></a>
-<span class="sourceLineNo">667</span>    try {<a name="line.667"></a>
-<span class="sourceLineNo">668</span>      super.login(user, host);<a name="line.668"></a>
-<span class="sourceLineNo">669</span>    } catch (IOException ie) {<a name="line.669"></a>
-<span class="sourceLineNo">670</span>      user.login("hbase.master.keytab.file",<a name="line.670"></a>
-<span class="sourceLineNo">671</span>        "hbase.master.kerberos.principal", host);<a name="line.671"></a>
-<span class="sourceLineNo">672</span>    }<a name="line.672"></a>
-<span class="sourceLineNo">673</span>  }<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>  /**<a name="line.675"></a>
-<span class="sourceLineNo">676</span>   * If configured to put regions on active master,<a name="line.676"></a>
-<span class="sourceLineNo">677</span>   * wait till a backup master becomes active.<a name="line.677"></a>
-<span class="sourceLineNo">678</span>   * Otherwise, loop till the server is stopped or aborted.<a name="line.678"></a>
-<span class="sourceLineNo">679</span>   */<a name="line.679"></a>
-<span class="sourceLineNo">680</span>  @Override<a name="line.680"></a>
-<span class="sourceLineNo">681</span>  protected void waitForMasterActive(){<a name="line.681"></a>
-<span class="sourceLineNo">682</span>    boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);<a name="line.682"></a>
-<span class="sourceLineNo">683</span>    while (!(tablesOnMaster &amp;&amp; activeMaster) &amp;&amp; !isStopped() &amp;&amp; !isAborted()) {<a name="line.683"></a>
-<span class="sourceLineNo">684</span>      sleeper.sleep();<a name="line.684"></a>
-<span class="sourceLineNo">685</span>    }<a name="line.685"></a>
-<span class="sourceLineNo">686</span>  }<a name="line.686"></a>
-<span class="sourceLineNo">687</span><a name="line.687"></a>
-<span class="sourceLineNo">688</span>  @VisibleForTesting<a name="line.688"></a>
-<span class="sourceLineNo">689</span>  public MasterRpcServices getMasterRpcServices() {<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    return (MasterRpcServices)rpcServices;<a name="line.690"></a>
-<span class="sourceLineNo">691</span>  }<a name="line.691"></a>
-<span class="sourceLineNo">692</span><a name="line.692"></a>
-<span class="sourceLineNo">693</span>  public boolean balanceSwitch(final boolean b) throws IOException {<a name="line.693"></a>
-<span class="sourceLineNo">694</span>    return getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);<a name="line.694"></a>
-<span class="sourceLineNo">695</span>  }<a name="line.695"></a>
-<span class="sourceLineNo">696</span><a name="line.696"></a>
-<span class="sourceLineNo">697</span>  @Override<a name="line.697"></a>
-<span class="sourceLineNo">698</span>  protected String getProcessName() {<a name="line.698"></a>
-<span class="sourceLineNo">699</span>    return MASTER;<a name="line.699"></a>
-<span class="sourceLineNo">700</span>  }<a name="line.700"></a>
-<span class="sourceLineNo">701</span><a name="line.701"></a>
-<span class="sourceLineNo">702</span>  @Override<a name="line.702"></a>
-<span class="sourceLineNo">703</span>  protected boolean canCreateBaseZNode() {<a name="line.703"></a>
-<span class="sourceLineNo">704</span>    return true;<a name="line.704"></a>
-<span class="sourceLineNo">705</span>  }<a name="line.705"></a>
-<span class="sourceLineNo">706</span><a name="line.706"></a>
-<span class="sourceLineNo">707</span>  @Override<a name="line.707"></a>
-<span class="sourceLineNo">708</span>  protected boolean canUpdateTableDescriptor() {<a name="line.708"></a>
-<span class="sourceLineNo">709</span>    return true;<a name="line.709"></a>
-<span class="sourceLineNo">710</span>  }<a name="line.710"></a>
-<span class="sourceLineNo">711</span><a name="line.711"></a>
-<span class="sourceLineNo">712</span>  @Override<a name="line.712"></a>
-<span class="sourceLineNo">713</span>  protected RSRpcServices createRpcServices() throws IOException {<a name="line.713"></a>
-<span class="sourceLineNo">714</span>    return new MasterRpcServices(this);<a name="line.714"></a>
-<span class="sourceLineNo">715</span>  }<a name="line.715"></a>
-<span class="sourceLineNo">716</span><a name="line.716"></a>
-<span class="sourceLineNo">717</span>  @Override<a name="line.717"></a>
-<span class="sourceLineNo">718</span>  protected void configureInfoServer() {<a name="line.718"></a>
-<span class="sourceLineNo">719</span>    infoServer.addServlet("master-status", "/master-status", MasterStatusServlet.class);<a name="line.719"></a>
-<span class="sourceLineNo">720</span>    infoServer.setAttribute(MASTER, this);<a name="line.720"></a>
-<span class="sourceLineNo">721</span>    if (LoadBalancer.isTablesOnMaster(conf)) {<a name="line.721"></a>
-<span class="sourceLineNo">722</span>      super.configureInfoServer();<a name="line.722"></a>
-<span class="sourceLineNo">723</span>    }<a name="line.723"></a>
-<span class="sourceLineNo">724</span>  }<a name="line.724"></a>
-<span class="sourceLineNo">725</span><a name="line.725"></a>
-<span class="sourceLineNo">726</span>  @Override<a name="line.726"></a>
-<span class="sourceLineNo">727</span>  protected Class&lt;? extends HttpServlet&gt; getDumpServlet() {<a name="line.727"></a>
-<span class="sourceLineNo">728</span>    return MasterDumpServlet.class;<a name="line.728"></a>
-<span class="sourceLineNo">729</span>  }<a name="line.729"></a>
-<span class="sourceLineNo">730</span><a name="line.730"></a>
-<span class="sourceLineNo">731</span>  @Override<a name="line.731"></a>
-<span class="sourceLineNo">732</span>  public MetricsMaster getMasterMetrics() {<a name="line.732"></a>
-<span class="sourceLineNo">733</span>    return metricsMaster;<a name="line.733"></a>
-<span class="sourceLineNo">734</span>  }<a name="line.734"></a>
-<span class="sourceLineNo">735</span><a name="line.735"></a>
-<span class="sourceLineNo">736</span>  /**<a name="line.736"></a>
-<span class="sourceLineNo">737</span>   * &lt;p&gt;<a name="line.737"></a>
-<span class="sourceLineNo">738</span>   * Initialize all ZK based system trackers. But do not include {@link RegionServerTracker}, it<a name="line.738"></a>
-<span class="sourceLineNo">739</span>   * should have already been initialized along with {@link ServerManager}.<a name="line.739"></a>
-<span class="sourceLineNo">740</span>   * &lt;/p&gt;<a name="line.740"></a>
-<span class="sourceLineNo">741</span>   * &lt;p&gt;<a name="line.741"></a>
-<span class="sourceLineNo">742</span>   * Will be overridden in tests.<a name="line.742"></a>
-<span class="sourceLineNo">743</span>   * &lt;/p&gt;<a name="line.743"></a>
-<span class="sourceLineNo">744</span>   */<a name="line.744"></a>
-<span class="sourceLineNo">745</span>  @VisibleForTesting<a name="line.745"></a>
-<span class="sourceLineNo">746</span>  protected void initializeZKBasedSystemTrackers()<a name="line.746"></a>
-<span class="sourceLineNo">747</span>      throws IOException, InterruptedException, KeeperException, ReplicationException {<a name="line.747"></a>
-<span class="sourceLineNo">748</span>    this.balancer = LoadBalancerFactory.getLoadBalancer(conf);<a name="line.748"></a>
-<span class="sourceLineNo">749</span>    this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);<a name="line.749"></a>
-<span class="sourceLineNo">750</span>    this.normalizer.setMasterServices(this);<a name="line.750"></a>
-<span class="sourceLineNo">751</span>    this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);<a name="line.751"></a>
-<span class="sourceLineNo">752</span>    this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);<a name="line.752"></a>
-<span class="sourceLineNo">753</span>    this.loadBalancerTracker.start();<a name="line.753"></a>
-<span class="sourceLineNo">754</span><a name="line.754"></a>
-<span class="sourceLineNo">755</span>    this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    this.regionNormalizerTracker.start();<a name="line.756"></a>
-<span class="sourceLineNo">757</span><a name="line.757"></a>
-<span class="sourceLineNo">758</span>    this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this);<a name="line.758"></a>
-<span class="sourceLineNo">759</span>    this.splitOrMergeTracker.start();<a name="line.759"></a>
-<span class="sourceLineNo">760</span><a name="line.760"></a>
-<span class="sourceLineNo">761</span>    this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);<a name="line.761"></a>
-<span class="sourceLineNo">762</span>    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);<a name="line.762"></a>
-<span class="sourceLineNo">763</span><a name="line.763"></a>
-<span class="sourceLineNo">764</span>    this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);<a name="line.764"></a>
-<span class="sourceLineNo">765</span>    this.drainingServerTracker.start();<a name="line.765"></a>
-<span class="sourceLineNo">766</span><a name="line.766"></a>
-<span class="sourceLineNo">767</span>    this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);<a name="line.767"></a>
-<span class="sourceLineNo">768</span>    this.maintenanceModeTracker.start();<a name="line.768"></a>
-<span class="sourceLineNo">769</span><a name="line.769"></a>
-<span class="sourceLineNo">770</span>    String clientQuorumServers = conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM);<a name="line.770"></a>
-<span class="sourceLineNo">771</span>    boolean clientZkObserverMode = conf.getBoolean(HConstants.CLIENT_ZOOKEEPER_OBSERVER_MODE,<a name="line.771"></a>
-<span class="sourceLineNo">772</span>      HConstants.DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE);<a name="line.772"></a>
-<span class="sourceLineNo">773</span>    if (clientQuorumServers != null &amp;&amp; !clientZkObserverMode) {<a name="line.773"></a>
-<span class="sourceLineNo">774</span>      // we need to take care of the ZK information synchronization<a name="line.774"></a>
-<span class="sourceLineNo">775</span>      // if given client ZK are not observer nodes<a name="line.775"></a>
-<span class="sourceLineNo">776</span>      ZKWatcher clientZkWatcher = new ZKWatcher(conf,<a name="line.776"></a>
-<span class="sourceLineNo">777</span>          getProcessName() + ":" + rpcServices.getSocketAddress().getPort() + "-clientZK", this,<a name="line.777"></a>
-<span class="sourceLineNo">778</span>          false, true);<a name="line.778"></a>
-<span class="sourceLineNo">779</span>      this.metaLocationSyncer = new MetaLocationSyncer(zooKeeper, clientZkWatcher, this);<a name="line.779"></a>
-<span class="sourceLineNo">780</span>      this.metaLocationSyncer.start();<a name="line.780"></a>
-<span class="sourceLineNo">781</span>      this.masterAddressSyncer = new MasterAddressSyncer(zooKeeper, clientZkWatcher, this);<a name="line.781"></a>
-<span class="sourceLineNo">782</span>      this.masterAddressSyncer.start();<a name="line.782"></a>
-<span class="sourceLineNo">783</span>      // set cluster id is a one-go effort<a name="line.783"></a>
-<span class="sourceLineNo">784</span>      ZKClusterId.setClusterId(clientZkWatcher, fileSystemManager.getClusterId());<a name="line.784"></a>
-<span class="sourceLineNo">785</span>    }<a name="line.785"></a>
-<span class="sourceLineNo">786</span><a name="line.786"></a>
-<span class="sourceLineNo">787</span>    // Set the cluster as up.  If new RSs, they'll be waiting on this before<a name="line.787"></a>
-<span class="sourceLineNo">788</span>    // going ahead with their startup.<a name="line.788"></a>
-<span class="sourceLineNo">789</span>    boolean wasUp = this.clusterStatusTracker.isClusterUp();<a name="line.789"></a>
-<span class="sourceLineNo">790</span>    if (!wasUp) this.clusterStatusTracker.setClusterUp();<a name="line.790"></a>
-<span class="sourceLineNo">791</span><a name="line.791"></a>
-<span class="sourceLineNo">792</span>    LOG.info("Active/primary master=" + this.serverName +<a name="line.792"></a>
-<span class="sourceLineNo">793</span>        ", sessionid=0x" +<a name="line.793"></a>
-<span class="sourceLineNo">794</span>        Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +<a name="line.794"></a>
-<span class="sourceLineNo">795</span>        ", setting cluster-up flag (Was=" + wasUp + ")");<a name="line.795"></a>
-<span class="sourceLineNo">796</span><a name="line.796"></a>
-<span class="sourceLineNo">797</span>    // create/initialize the snapshot manager and other procedure managers<a name="line.797"></a>
-<span class="sourceLineNo">798</span>    this.snapshotManager = new SnapshotManager();<a name="line.798"></a>
-<span class="sourceLineNo">799</span>    this.mpmHost = new MasterProcedureManagerHost();<a name="line.799"></a>
-<span class="sourceLineNo">800</span>    this.mpmHost.register(this.snapshotManager);<a name="line.800"></a>
-<span class="sourceLineNo">801</span>    this.mpmHost.register(new MasterFlushTableProcedureManager());<a name="line.801"></a>
-<span class="sourceLineNo">802</span>    this.mpmHost.loadProcedures(conf);<a name="line.802"></a>
-<span class="sourceLineNo">803</span>    this.mpmHost.initialize(this, this.metricsMaster);<a name="line.803"></a>
-<span class="sourceLineNo">804</span>  }<a name="line.804"></a>
-<span class="sourceLineNo">805</span><a name="line.805"></a>
-<span class="sourceLineNo">806</span>  private static final ImmutableSet&lt;Class&lt;?&gt;&gt; UNSUPPORTED_PROCEDURES =<a name="line.806"></a>
-<span class="sourceLineNo">807</span>    ImmutableSet.of(RecoverMetaProcedure.class, AssignProcedure.class, UnassignProcedure.class,<a name="line.807"></a>
-<span class="sourceLineNo">808</span>      MoveRegionProcedure.class);<a name="line.808"></a>
-<span class="sourceLineNo">809</span><a name="line.809"></a>
-<span class="sourceLineNo">810</span>  /**<a name="line.810"></a>
-<span class="sourceLineNo">811</span>   * In HBASE-20811, we have introduced a new TRSP to assign/unassign/move regions, and it is<a name="line.811"></a>
-<span class="sourceLineNo">812</span>   * incompatible with the old AssignProcedure/UnassignProcedure/MoveRegionProcedure. So we need to<a name="line.812"></a>
-<span class="sourceLineNo">813</span>   * make sure that there are none these procedures when upgrading. If there are, the master will<a name="line.813"></a>
-<span class="sourceLineNo">814</span>   * quit, you need to go back to the old version to finish these procedures first before upgrading.<a name="line.814"></a>
-<span class="sourceLineNo">815</span>   */<a name="line.815"></a>
-<span class="sourceLineNo">816</span>  private void checkUnsupportedProcedure(<a name="line.816"></a>
-<span class="sourceLineNo">817</span>      Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType)<a name="line.817"></a>
-<span class="sourceLineNo">818</span>      throws HBaseIOException {<a name="line.818"></a>
-<span class="sourceLineNo">819</span>    // Confirm that we do not have unfinished assign/unassign related procedures. It is not easy to<a name="line.819"></a>
-<span class="sourceLineNo">820</span>    // support both the old assign/unassign procedures and the new TransitRegionStateProcedure as<a name="line.820"></a>
-<span class="sourceLineNo">821</span>    // there will be conflict in the code for AM. We should finish all these procedures before<a name="line.821"></a>
-<span class="sourceLineNo">822</span>    // upgrading.<a name="line.822"></a>
-<span class="sourceLineNo">823</span>    for (Class&lt;?&gt; clazz : UNSUPPORTED_PROCEDURES) {<a name="line.823"></a>
-<span class="sourceLineNo">824</span>      List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt; procs = procsByType.get(clazz);<a name="line.824"></a>
-<span class="sourceLineNo">825</span>      if (procs != null) {<a name="line.825"></a>
-<span class="sourceLineNo">826</span>        LOG.error(<a name="line.826"></a>
-<span class="sourceLineNo">827</span>          "Unsupported procedure type {} found, please rollback your master to the old" +<a name="line.827"></a>
-<span class="sourceLineNo">828</span>            " version to finish them, and then try to upgrade again. The full procedure list: {}",<a name="line.828"></a>
-<span class="sourceLineNo">829</span>          clazz, procs);<a name="line.829"></a>
-<span class="sourceLineNo">830</span>        throw new HBaseIOException("Unsupported procedure type " + clazz + " found");<a name="line.830"></a>
-<span class="sourceLineNo">831</span>      }<a name="line.831"></a>
-<span class="sourceLineNo">832</span>    }<a name="line.832"></a>
-<span class="sourceLineNo">833</span>    // A special check for SCP, as we do not support RecoverMetaProcedure any more so we need to<a name="line.833"></a>
-<span class="sourceLineNo">834</span>    // make sure that no one will try to schedule it but SCP does have a state which will schedule<a name="line.834"></a>
-<span class="sourceLineNo">835</span>    // it.<a name="line.835"></a>
-<span class="sourceLineNo">836</span>    if (procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.836"></a>
-<span class="sourceLineNo">837</span>      .map(p -&gt; (ServerCrashProcedure) p).anyMatch(ServerCrashProcedure::isInRecoverMetaState)) {<a name="line.837"></a>
-<span class="sourceLineNo">838</span>      LOG.error("At least one ServerCrashProcedure is going to schedule a RecoverMetaProcedure," +<a name="line.838"></a>
-<span class="sourceLineNo">839</span>        " which is not supported any more. Please rollback your master to the old version to" +<a name="line.839"></a>
-<span class="sourceLineNo">840</span>        " finish them, and then try to upgrade again.");<a name="line.840"></a>
-<span class="sourceLineNo">841</span>      throw new HBaseIOException("Unsupported procedure state found for ServerCrashProcedure");<a name="line.841"></a>
-<span class="sourceLineNo">842</span>    }<a name="line.842"></a>
-<span class="sourceLineNo">843</span>  }<a name="line.843"></a>
-<span class="sourceLineNo">844</span><a name="line.844"></a>
-<span class="sourceLineNo">845</span>  /**<a name="line.845"></a>
-<span class="sourceLineNo">846</span>   * Finish initialization of HMaster after becoming the primary master.<a name="line.846"></a>
-<span class="sourceLineNo">847</span>   * &lt;p/&gt;<a name="line.847"></a>
-<span class="sourceLineNo">848</span>   * The startup order is a bit complicated but very important, do not change it unless you know<a name="line.848"></a>
-<span class="sourceLineNo">849</span>   * what you are doing.<a name="line.849"></a>
-<span class="sourceLineNo">850</span>   * &lt;ol&gt;<a name="line.850"></a>
-<span class="sourceLineNo">851</span>   * &lt;li&gt;Initialize file system based components - file system manager, wal manager, table<a name="line.851"></a>
-<span class="sourceLineNo">852</span>   * descriptors, etc&lt;/li&gt;<a name="line.852"></a>
-<span class="sourceLineNo">853</span>   * &lt;li&gt;Publish cluster id&lt;/li&gt;<a name="line.853"></a>
-<span class="sourceLineNo">854</span>   * &lt;li&gt;Here comes the most complicated part - initialize server manager, assignment manager and<a name="line.854"></a>
-<span class="sourceLineNo">855</span>   * region server tracker<a name="line.855"></a>
-<span class="sourceLineNo">856</span>   * &lt;ol type='i'&gt;<a name="line.856"></a>
-<span class="sourceLineNo">857</span>   * &lt;li&gt;Create server manager&lt;/li&gt;<a name="line.857"></a>
-<span class="sourceLineNo">858</span>   * &lt;li&gt;Create procedure executor, load the procedures, but do not start workers. We will start it<a name="line.858"></a>
-<span class="sourceLineNo">859</span>   * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same<a name="line.859"></a>
-<span class="sourceLineNo">860</span>   * server&lt;/li&gt;<a name="line.860"></a>
-<span class="sourceLineNo">861</span>   * &lt;li&gt;Create assignment manager and start it, load the meta region state, but do not load data<a name="line.861"></a>
-<span class="sourceLineNo">862</span>   * from meta region&lt;/li&gt;<a name="line.862"></a>
-<span class="sourceLineNo">863</span>   * &lt;li&gt;Start region server tracker, construct the online servers set and find out dead servers and<a name="line.863"></a>
-<span class="sourceLineNo">864</span>   * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also<a name="line.864"></a>
-<span class="sourceLineNo">865</span>   * scan the wal directory to find out possible live region servers, and the differences between<a name="line.865"></a>
-<span class="sourceLineNo">866</span>   * these two sets are the dead servers&lt;/li&gt;<a name="line.866"></a>
-<span class="sourceLineNo">867</span>   * &lt;/ol&gt;<a name="line.867"></a>
-<span class="sourceLineNo">868</span>   * &lt;/li&gt;<a name="line.868"></a>
-<span class="sourceLineNo">869</span>   * &lt;li&gt;If this is a new deploy, schedule a InitMetaProcedure to initialize meta&lt;/li&gt;<a name="line.869"></a>
-<span class="sourceLineNo">870</span>   * &lt;li&gt;Start necessary service threads - balancer, catalog janior, executor services, and also the<a name="line.870"></a>
-<span class="sourceLineNo">871</span>   * procedure executor, etc. Notice that the balancer must be created first as assignment manager<a name="line.871"></a>
-<span class="sourceLineNo">872</span>   * may use it when assigning regions.&lt;/li&gt;<a name="line.872"></a>
-<span class="sourceLineNo">873</span>   * &lt;li&gt;Wait for meta to be initialized if necesssary, start table state manager.&lt;/li&gt;<a name="line.873"></a>
-<span class="sourceLineNo">874</span>   * &lt;li&gt;Wait for enough region servers to check-in&lt;/li&gt;<a name="line.874"></a>
-<span class="sourceLineNo">875</span>   * &lt;li&gt;Let assignment manager load data from meta and construct region states&lt;/li&gt;<a name="line.875"></a>
-<span class="sourceLineNo">876</span>   * &lt;li&gt;Start all other things such as chore services, etc&lt;/li&gt;<a name="line.876"></a>
-<span class="sourceLineNo">877</span>   * &lt;/ol&gt;<a name="line.877"></a>
-<span class="sourceLineNo">878</span>   * &lt;p/&gt;<a name="line.878"></a>
-<span class="sourceLineNo">879</span>   * Notice that now we will not schedule a special procedure to make meta online(unless the first<a name="line.879"></a>
-<span class="sourceLineNo">880</span>   * time where meta has not been created yet), we will rely on SCP to bring meta online.<a name="line.880"></a>
-<span class="sourceLineNo">881</span>   */<a name="line.881"></a>
-<span class="sourceLineNo">882</span>  private void finishActiveMasterInitialization(MonitoredTask status) throws IOException,<a name="line.882"></a>
-<span class="sourceLineNo">883</span>          InterruptedException, KeeperException, ReplicationException {<a name="line.883"></a>
-<span class="sourceLineNo">884</span>    Thread zombieDetector = new Thread(new InitializationMonitor(this),<a name="line.884"></a>
-<span class="sourceLineNo">885</span>        "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());<a name="line.885"></a>
-<span class="sourceLineNo">886</span>    zombieDetector.setDaemon(true);<a name="line.886"></a>
-<span class="sourceLineNo">887</span>    zombieDetector.start();<a name="line.887"></a>
-<span class="sourceLineNo">888</span><a name="line.888"></a>
-<span class="sourceLineNo">889</span>    /*<a name="line.889"></a>
-<span class="sourceLineNo">890</span>     * We are active master now... go initialize components we need to run.<a name="line.890"></a>
-<span class="sourceLineNo">891</span>     */<a name="line.891"></a>
-<span class="sourceLineNo">892</span>    status.setStatus("Initializing Master file system");<a name="line.892"></a>
-<span class="sourceLineNo">893</span><a name="line.893"></a>
-<span class="sourceLineNo">894</span>    this.masterActiveTime = System.currentTimeMillis();<a name="line.894"></a>
-<span class="sourceLineNo">895</span>    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.<a name="line.895"></a>
-<span class="sourceLineNo">896</span>    // Initialize the chunkCreator<a name="line.896"></a>
-<span class="sourceLineNo">897</span>    initializeMemStoreChunkCreator();<a name="line.897"></a>
-<span class="sourceLineNo">898</span>    this.fileSystemManager = new MasterFileSystem(conf);<a name="line.898"></a>
-<span class="sourceLineNo">899</span>    this.walManager = new MasterWalManager(this);<a name="line.899"></a>
-<span class="sourceLineNo">900</span><a name="line.900"></a>
-<span class="sourceLineNo">901</span>    // enable table descriptors cache<a name="line.901"></a>
-<span class="sourceLineNo">902</span>    this.tableDescriptors.setCacheOn();<a name="line.902"></a>
-<span class="sourceLineNo">903</span><a name="line.903"></a>
-<span class="sourceLineNo">904</span>    // warm-up HTDs cache on master initialization<a name="line.904"></a>
-<span class="sourceLineNo">905</span>    if (preLoadTableDescriptors) {<a name="line.905"></a>
-<span class="sourceLineNo">906</span>      status.setStatus("Pre-loading table descriptors");<a name="line.906"></a>
-<span class="sourceLineNo">907</span>      this.tableDescriptors.getAll();<a name="line.907"></a>
-<span class="sourceLineNo">908</span>    }<a name="line.908"></a>
-<span class="sourceLineNo">909</span><a name="line.909"></a>
-<span class="sourceLineNo">910</span>    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but<a name="line.910"></a>
-<span class="sourceLineNo">911</span>    // only after it has checked in with the Master. At least a few tests ask Master for clusterId<a name="line.911"></a>
-<span class="sourceLineNo">912</span>    // before it has called its run method and before RegionServer has done the reportForDuty.<a name="line.912"></a>
-<span class="sourceLineNo">913</span>    ClusterId clusterId = fileSystemManager.getClusterId();<a name="line.913"></a>
-<span class="sourceLineNo">914</span>    status.setStatus("Publishing Cluster ID " + clusterId + " in ZooKeeper");<a name="line.914"></a>
-<span class="sourceLineNo">915</span>    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());<a name="line.915"></a>
-<span class="sourceLineNo">916</span>    this.clusterId = clusterId.toString();<a name="line.916"></a>
-<span class="sourceLineNo">917</span><a name="line.917"></a>
-<span class="sourceLineNo">918</span>    status.setStatus("Initialze ServerManager and schedule SCP for crash servers");<a name="line.918"></a>
-<span class="sourceLineNo">919</span>    this.serverManager = createServerManager(this);<a name="line.919"></a>
-<span class="sourceLineNo">920</span>    createProcedureExecutor();<a name="line.920"></a>
-<span class="sourceLineNo">921</span>    @SuppressWarnings("rawtypes")<a name="line.921"></a>
-<span class="sourceLineNo">922</span>    Map&lt;Class&lt;? extends Procedure&gt;, List&lt;Procedure&lt;MasterProcedureEnv&gt;&gt;&gt; procsByType =<a name="line.922"></a>
-<span class="sourceLineNo">923</span>      procedureExecutor.getActiveProceduresNoCopy().stream()<a name="line.923"></a>
-<span class="sourceLineNo">924</span>        .collect(Collectors.groupingBy(p -&gt; p.getClass()));<a name="line.924"></a>
-<span class="sourceLineNo">925</span><a name="line.925"></a>
-<span class="sourceLineNo">926</span>    checkUnsupportedProcedure(procsByType);<a name="line.926"></a>
-<span class="sourceLineNo">927</span><a name="line.927"></a>
-<span class="sourceLineNo">928</span>    // Create Assignment Manager<a name="line.928"></a>
-<span class="sourceLineNo">929</span>    this.assignmentManager = new AssignmentManager(this);<a name="line.929"></a>
-<span class="sourceLineNo">930</span>    this.assignmentManager.start();<a name="line.930"></a>
-<span class="sourceLineNo">931</span>    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as<a name="line.931"></a>
-<span class="sourceLineNo">932</span>    // completed, it could still be in the procedure list. This is a bit strange but is another<a name="line.932"></a>
-<span class="sourceLineNo">933</span>    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.<a name="line.933"></a>
-<span class="sourceLineNo">934</span>    List&lt;TransitRegionStateProcedure&gt; ritList =<a name="line.934"></a>
-<span class="sourceLineNo">935</span>      procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()<a name="line.935"></a>
-<span class="sourceLineNo">936</span>        .filter(p -&gt; !p.isFinished()).map(p -&gt; (TransitRegionStateProcedure) p)<a name="line.936"></a>
-<span class="sourceLineNo">937</span>        .collect(Collectors.toList());<a name="line.937"></a>
-<span class="sourceLineNo">938</span>    this.assignmentManager.setupRIT(ritList);<a name="line.938"></a>
-<span class="sourceLineNo">939</span><a name="line.939"></a>
-<span class="sourceLineNo">940</span>    this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);<a name="line.940"></a>
-<span class="sourceLineNo">941</span>    this.regionServerTracker.start(<a name="line.941"></a>
-<span class="sourceLineNo">942</span>      procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()<a name="line.942"></a>
-<span class="sourceLineNo">943</span>        .map(p -&gt; (ServerCrashProcedure) p).map(p -&gt; p.getServerName()).collect(Collectors.toSet()),<a name="line.943"></a>
-<span class="sourceLineNo">944</span>      walManager.getLiveServersFromWALDir());<a name="line.944"></a>
-<span class="sourceLineNo">945</span>    // This manager will be started AFTER hbase:meta is confirmed on line.<a name="line.945"></a>
-<span class="sourceLineNo">946</span>    // hbase.mirror.table.state.to.zookeeper is so hbase1 clients can connect. They read table<a name="line.946"></a>
-<span class="sourceLineNo">947</span>    // state from zookeeper while hbase2 reads it from hbase:meta. Disable if no hbase1 clients.<a name="line.947"></a>
-<span class="sourceLineNo">948</span>    this.tableStateManager =<a name="line.948"></a>
-<span class="sourceLineNo">949</span>      this.conf.getBoolean(MirroringTableStateManager.MIRROR_TABLE_STATE_TO_ZK_KEY, true)<a name="line.949"></a>
-<span class="sourceLineNo">950</span>        ?<a name="line.950"></a>
-<span class="sourceLineNo">951</span>        new MirroringTableStateManager(this):<a name="line.951"></a>
-<span class="sourceLineNo">952</span>        new TableStateManager(this);<a name="line.952"></a>
-<span class="sourceLineNo">953</span><a name="line.953"></a>
-<span class="sourceLineNo">954</span>    status.setStatus("Initializing ZK system trackers");<a name="line.954"></a>
-<span class="sourceLineNo">955</span>    initializeZKBasedSystemTrackers();<a name="line.955"></a>
-<span class="sourceLineNo">956</span>    status.setStatus("Loading last flushed sequence id of regions");<a name="line.956"></a>
-<span class="sourceLineNo">957</span>    try {<a name="line.957"></a>
-<span class="sourceLineNo">958</span>      this.serverManager.loadLastFlushedSequenceIds();<a name="line.958"></a>
-<span class="sourceLineNo">959</span>    } catch (IOException e) {<a name="line.959"></a>
-<span class="sourceLineNo">960</span>      LOG.debug("Failed to load last flushed sequence id of regions"<a name="line.960"></a>
-<span class="sourceLineNo">961</span>          + " from file system", e);<a name="line.961"></a>
-<span class="sourceLineNo">962</span>    }<a name="line.962"></a>
-<span class="sourceLineNo">963</span>    // Set ourselves as active Master now our claim has succeeded up in zk.<a name="line.963"></a>
-<span class="sourceLineNo">964</span>    this.activeMaster = true;<a name="line.964"></a>
-<span class="sourceLineNo">965</span><a name="line.965"></a>
-<span class="sourceLineNo">966</span>    // This is for backwards compatibility<a name="line.966"></a>
-<span class="sourceLineNo">967</span>    // See HBASE-11393<a name="line.967"></a>
-<span class="sourceLineNo">968</span>    status.setStatus("Update TableCFs node in ZNode");<a name="line.968"></a>
-<span class="sourceLineNo">969</span>    ReplicationPeerConfigUpgrader tableCFsUpdater =<a name="line.969"></a>
-<span class="sourceLineNo">970</span>        new ReplicationPeerConfigUpgrader(zooKeeper, conf);<a name="line.970"></a>
-<span class="sourceLineNo">971</span>    tableCFsUpdater.copyTableCFs();<a name="line.971"></a>
-<span class="sourceLineNo">972</span><a name="line.972"></a>
-<span class="sourceLineNo">973</span>    // Add the Observer to delete quotas on table deletion before starting all CPs by<a name="line.973"></a>
-<span class="sourceLineNo">974</span>    // default with quota support, avoiding if user specifically asks to not load this Observer.<a name="line.974"></a>
-<span class="sourceLineNo">975</span>    if (QuotaUtil.isQuotaEnabled(conf)) {<a name="line.975"></a>
-<span class="sourceLineNo">976</span>      updateConfigurationForQuotasObserver(conf);<a name="line.976"></a>
-<span class="sourceLineNo">977</span>    }<a name="line.977"></a>
-<span class="sourceLineNo">978</span>    // initialize master side coprocessors before we start handling requests<a name="line.978"></a>
-<span class="sourceLineNo">979</span>    status.setStatus("Initializing master coprocessors");<a name="line.979"></a>
-<span class="sourceLineNo">980</span>    this.cpHost = new MasterCoprocessorHost(this, this.conf);<a name="line.980"></a>
+<span class="sourceLineNo">196</span>import org.apache.hadoop.hbase.util.HBaseFsck;<a name="line.196"></a>
+<span class="sourceLineNo">197</span>import org.apache.hadoop.hbase.util.HFileArchiveUtil;<a name="line.197"></a>
+<span class="sourceLineNo">198</span>import org.apache.hadoop.hbase.util.HasThread;<a name="line.198"></a>
+<span class="sourceLineNo">199</span>import org.apache.hadoop.hbase.util.IdLock;<a name="line.199"></a>
+<span class="sourceLineNo">200</span>import org.apache.hadoop.hbase.util.ModifyRegionUtils;<a name="line.200"></a>
+<span class="sourceLineNo">201</span>import org.apache.hadoop.hbase.util.Pair;<a name="line.201"></a>
+<span class="sourceLineNo">202</span>import org.apache.hadoop.hbase.util.Threads;<a name="line.202"></a>
+<span class="sourceLineNo">203</span>import org.apache.hadoop.hbase.util.VersionInfo;<a name="line.203"></a>
+<span class="sourceLineNo">204</span>import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;<a name="line.204"></a>
+<span class="sourceLineNo">205</span>import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;<a name="line.205"></a>
+<span class="sourceLineNo">206</span>import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;<a name="line.206"></a>
+<span class="sourceLineNo">207</span>import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;<a name="line.207"></a>
+<span class="sourceLineNo">208</span>import org.apache.hadoop.hbase.zookeeper.ZKClusterId;<a name="line.208"></a>
+<span class="sourceLineNo">209</span>import org.apache.hadoop.hbase.zookeeper.ZKUtil;<a name="line.209"></a>
+<span class="sourceLineNo">210</span>import org.apache.hadoop.hbase.zookeeper.ZKWatcher;<a name="line.210"></a>
+<span class="sourceLineNo">211</span>import org.apache.hadoop.hbase.zookeeper.ZNodePaths;<a name="line.211"></a>
+<span class="sourceLineNo">212</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.212"></a>
+<span class="sourceLineNo">213</span>import org.apache.zookeeper.KeeperException;<a name="line.213"></a>
+<span class="sourceLineNo">214</span>import org.eclipse.jetty.server.Server;<a name="line.214"></a>
+<span class="sourceLineNo">215</span>import org.eclipse.jetty.server.ServerConnector;<a name="line.215"></a>
+<span class="sourceLineNo">216</span>import org.eclipse.jetty.servlet.ServletHolder;<a name="line.216"></a>
+<span class="sourceLineNo">217</span>import org.eclipse.jetty.webapp.WebAppContext;<a name="line.217"></a>
+<span class="sourceLineNo">218</span>import org.slf4j.Logger;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>import org.slf4j.LoggerFactory;<a name="line.219"></a>
+<span class="sourceLineNo">220</span><a name="line.220"></a>
+<span class="sourceLineNo">221</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.223"></a>
+<span class="sourceLineNo">224</span>import org.apache.hbase.thirdparty.com.google.common.collect.Maps;<a name="line.224"></a>
+<span class="sourceLineNo">225</span><a name="line.225"></a>
+<span class="sourceLineNo">226</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.226"></a>
+<span class="sourceLineNo">227</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;<a name="line.227"></a>
+<span class="sourceLineNo">228</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;<a name="line.228"></a>
+<span class="sourceLineNo">229</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;<a name="line.229"></a>
+<span class="sourceLineNo">230</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;<a name="line.231"></a>
+<span class="sourceLineNo">232</span><a name="line.232"></a>
+<span class="sourceLineNo">233</span>/**<a name="line.233"></a>
+<span class="sourceLineNo">234</span> * HMaster is the "master server" for HBase. An HBase cluster has one active<a name="line.234"></a>
+<span class="sourceLineNo">235</span> * master.  If many masters are started, all compete.  Whichever wins goes on to<a name="line.235"></a>
+<span class="sourceLineNo">236</span> * run the cluster.  All others park themselves in their constructor until<a name="line.236"></a>
+<span class="sourceLineNo">237</span> * master or cluster shutdown or until the active master loses its lease in<a name="line.237"></a>
+<span class="sourceLineNo">238</span> * zookeeper.  Thereafter, all running master jostle to take over master role.<a name="line.238"></a>
+<span class="sourceLineNo">239</span> *<a name="line.239"></a>
+<span class="sourceLineNo">240</span> * &lt;p&gt;The Master can be asked shutdown the cluster. See {@link #shutdown()}.  In<a name="line.240"></a>
+<span class="sourceLineNo">241</span> * this case it will tell all regionservers to go down and then wait on them<a name="line.241"></a>
+<span class="sourceLineNo">242</span> * all reporting in that they are down.  This master will then shut itself down.<a name="line.242"></a>
+<span class="sourceLineNo">243</span> *<a name="line.243"></a>
+<span class="sourceLineNo">244</span> * &lt;p&gt;You can also shutdown just this master.  Call {@link #stopMaster()}.<a na

<TRUNCATED>

[04/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the lock<a name="line.555"></a>
+<span class="sourceLineNo">556</spa

<TRUNCATED>

[12/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

Posted by gi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -152,5137 +152,5182 @@
 <span class="sourceLineNo">144</span>import org.apache.hadoop.util.ReflectionUtils;<a name="line.144"></a>
 <span class="sourceLineNo">145</span>import org.apache.hadoop.util.Tool;<a name="line.145"></a>
 <span class="sourceLineNo">146</span>import org.apache.hadoop.util.ToolRunner;<a name="line.146"></a>
-<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.147"></a>
-<span class="sourceLineNo">148</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.148"></a>
-<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.149"></a>
-<span class="sourceLineNo">150</span>import org.apache.zookeeper.KeeperException;<a name="line.150"></a>
-<span class="sourceLineNo">151</span>import org.slf4j.Logger;<a name="line.151"></a>
-<span class="sourceLineNo">152</span>import org.slf4j.LoggerFactory;<a name="line.152"></a>
-<span class="sourceLineNo">153</span><a name="line.153"></a>
-<span class="sourceLineNo">154</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.154"></a>
-<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.155"></a>
-<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.156"></a>
-<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.157"></a>
-<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.158"></a>
-<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.159"></a>
-<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.160"></a>
-<span class="sourceLineNo">161</span><a name="line.161"></a>
-<span class="sourceLineNo">162</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.162"></a>
-<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.163"></a>
-<span class="sourceLineNo">164</span><a name="line.164"></a>
-<span class="sourceLineNo">165</span>/**<a name="line.165"></a>
-<span class="sourceLineNo">166</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.166"></a>
-<span class="sourceLineNo">167</span> * table integrity problems in a corrupted HBase.<a name="line.167"></a>
-<span class="sourceLineNo">168</span> * &lt;p&gt;<a name="line.168"></a>
-<span class="sourceLineNo">169</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.169"></a>
-<span class="sourceLineNo">170</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.170"></a>
-<span class="sourceLineNo">171</span> * accordance.<a name="line.171"></a>
+<span class="sourceLineNo">147</span>import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;<a name="line.147"></a>
+<span class="sourceLineNo">148</span>import org.apache.hbase.thirdparty.com.google.common.collect.Sets;<a name="line.148"></a>
+<span class="sourceLineNo">149</span>import org.apache.yetus.audience.InterfaceAudience;<a name="line.149"></a>
+<span class="sourceLineNo">150</span>import org.apache.yetus.audience.InterfaceStability;<a name="line.150"></a>
+<span class="sourceLineNo">151</span>import org.apache.zookeeper.KeeperException;<a name="line.151"></a>
+<span class="sourceLineNo">152</span>import org.slf4j.Logger;<a name="line.152"></a>
+<span class="sourceLineNo">153</span>import org.slf4j.LoggerFactory;<a name="line.153"></a>
+<span class="sourceLineNo">154</span><a name="line.154"></a>
+<span class="sourceLineNo">155</span>import org.apache.hbase.thirdparty.com.google.common.base.Joiner;<a name="line.155"></a>
+<span class="sourceLineNo">156</span>import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;<a name="line.156"></a>
+<span class="sourceLineNo">157</span>import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;<a name="line.157"></a>
+<span class="sourceLineNo">158</span>import org.apache.hbase.thirdparty.com.google.common.collect.Lists;<a name="line.158"></a>
+<span class="sourceLineNo">159</span>import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;<a name="line.159"></a>
+<span class="sourceLineNo">160</span>import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;<a name="line.160"></a>
+<span class="sourceLineNo">161</span>import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;<a name="line.161"></a>
+<span class="sourceLineNo">162</span><a name="line.162"></a>
+<span class="sourceLineNo">163</span>import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;<a name="line.163"></a>
+<span class="sourceLineNo">164</span>import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;<a name="line.164"></a>
+<span class="sourceLineNo">165</span><a name="line.165"></a>
+<span class="sourceLineNo">166</span>/**<a name="line.166"></a>
+<span class="sourceLineNo">167</span> * HBaseFsck (hbck) is a tool for checking and repairing region consistency and<a name="line.167"></a>
+<span class="sourceLineNo">168</span> * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not<a name="line.168"></a>
+<span class="sourceLineNo">169</span> * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'.<a name="line.169"></a>
+<span class="sourceLineNo">170</span> * See hbck2 (HBASE-19121) for a hbck tool for hbase2.<a name="line.170"></a>
+<span class="sourceLineNo">171</span> *<a name="line.171"></a>
 <span class="sourceLineNo">172</span> * &lt;p&gt;<a name="line.172"></a>
-<span class="sourceLineNo">173</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.173"></a>
-<span class="sourceLineNo">174</span> * one region of a table.  This means there are no individual degenerate<a name="line.174"></a>
-<span class="sourceLineNo">175</span> * or backwards regions; no holes between regions; and that there are no<a name="line.175"></a>
-<span class="sourceLineNo">176</span> * overlapping regions.<a name="line.176"></a>
-<span class="sourceLineNo">177</span> * &lt;p&gt;<a name="line.177"></a>
-<span class="sourceLineNo">178</span> * The general repair strategy works in two phases:<a name="line.178"></a>
-<span class="sourceLineNo">179</span> * &lt;ol&gt;<a name="line.179"></a>
-<span class="sourceLineNo">180</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.180"></a>
-<span class="sourceLineNo">181</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.181"></a>
-<span class="sourceLineNo">182</span> * &lt;/ol&gt;<a name="line.182"></a>
-<span class="sourceLineNo">183</span> * &lt;p&gt;<a name="line.183"></a>
-<span class="sourceLineNo">184</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.184"></a>
-<span class="sourceLineNo">185</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.185"></a>
-<span class="sourceLineNo">186</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.186"></a>
-<span class="sourceLineNo">187</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.187"></a>
-<span class="sourceLineNo">188</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.188"></a>
-<span class="sourceLineNo">189</span> * a new region is created and all data is merged into the new region.<a name="line.189"></a>
-<span class="sourceLineNo">190</span> * &lt;p&gt;<a name="line.190"></a>
-<span class="sourceLineNo">191</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.191"></a>
-<span class="sourceLineNo">192</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.192"></a>
-<span class="sourceLineNo">193</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.193"></a>
-<span class="sourceLineNo">194</span> * an offline fashion.<a name="line.194"></a>
-<span class="sourceLineNo">195</span> * &lt;p&gt;<a name="line.195"></a>
-<span class="sourceLineNo">196</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.196"></a>
-<span class="sourceLineNo">197</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.197"></a>
-<span class="sourceLineNo">198</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.198"></a>
-<span class="sourceLineNo">199</span> * with proper state in the master.<a name="line.199"></a>
-<span class="sourceLineNo">200</span> * &lt;p&gt;<a name="line.200"></a>
-<span class="sourceLineNo">201</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.201"></a>
-<span class="sourceLineNo">202</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.202"></a>
-<span class="sourceLineNo">203</span> * first be called successfully.  Much of the region consistency information<a name="line.203"></a>
-<span class="sourceLineNo">204</span> * is transient and less risky to repair.<a name="line.204"></a>
-<span class="sourceLineNo">205</span> * &lt;p&gt;<a name="line.205"></a>
-<span class="sourceLineNo">206</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.206"></a>
-<span class="sourceLineNo">207</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.207"></a>
-<span class="sourceLineNo">208</span> * {@link #printUsageAndExit()} for more details.<a name="line.208"></a>
-<span class="sourceLineNo">209</span> */<a name="line.209"></a>
-<span class="sourceLineNo">210</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.210"></a>
-<span class="sourceLineNo">211</span>@InterfaceStability.Evolving<a name="line.211"></a>
-<span class="sourceLineNo">212</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.212"></a>
-<span class="sourceLineNo">213</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.213"></a>
-<span class="sourceLineNo">214</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.214"></a>
-<span class="sourceLineNo">215</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.215"></a>
-<span class="sourceLineNo">216</span>  private static boolean rsSupportsOffline = true;<a name="line.216"></a>
-<span class="sourceLineNo">217</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.217"></a>
-<span class="sourceLineNo">218</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.218"></a>
-<span class="sourceLineNo">219</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.219"></a>
-<span class="sourceLineNo">220</span>  private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.220"></a>
-<span class="sourceLineNo">221</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.221"></a>
-<span class="sourceLineNo">222</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.222"></a>
-<span class="sourceLineNo">223</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.223"></a>
-<span class="sourceLineNo">224</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.224"></a>
-<span class="sourceLineNo">225</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.225"></a>
-<span class="sourceLineNo">226</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.226"></a>
-<span class="sourceLineNo">227</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.227"></a>
-<span class="sourceLineNo">228</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.228"></a>
-<span class="sourceLineNo">229</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.229"></a>
-<span class="sourceLineNo">230</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.230"></a>
-<span class="sourceLineNo">231</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.231"></a>
-<span class="sourceLineNo">232</span><a name="line.232"></a>
-<span class="sourceLineNo">233</span>  /**********************<a name="line.233"></a>
-<span class="sourceLineNo">234</span>   * Internal resources<a name="line.234"></a>
-<span class="sourceLineNo">235</span>   **********************/<a name="line.235"></a>
-<span class="sourceLineNo">236</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.236"></a>
-<span class="sourceLineNo">237</span>  private ClusterMetrics status;<a name="line.237"></a>
-<span class="sourceLineNo">238</span>  private ClusterConnection connection;<a name="line.238"></a>
-<span class="sourceLineNo">239</span>  private Admin admin;<a name="line.239"></a>
-<span class="sourceLineNo">240</span>  private Table meta;<a name="line.240"></a>
-<span class="sourceLineNo">241</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.241"></a>
-<span class="sourceLineNo">242</span>  protected ExecutorService executor;<a name="line.242"></a>
-<span class="sourceLineNo">243</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.243"></a>
-<span class="sourceLineNo">244</span>  private HFileCorruptionChecker hfcc;<a name="line.244"></a>
-<span class="sourceLineNo">245</span>  private int retcode = 0;<a name="line.245"></a>
-<span class="sourceLineNo">246</span>  private Path HBCK_LOCK_PATH;<a name="line.246"></a>
-<span class="sourceLineNo">247</span>  private FSDataOutputStream hbckOutFd;<a name="line.247"></a>
-<span class="sourceLineNo">248</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.248"></a>
-<span class="sourceLineNo">249</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.249"></a>
-<span class="sourceLineNo">250</span>  // successful<a name="line.250"></a>
-<span class="sourceLineNo">251</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.251"></a>
-<span class="sourceLineNo">252</span><a name="line.252"></a>
-<span class="sourceLineNo">253</span>  // Unsupported options in HBase 2.0+<a name="line.253"></a>
-<span class="sourceLineNo">254</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.254"></a>
-<span class="sourceLineNo">255</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.255"></a>
-<span class="sourceLineNo">256</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.256"></a>
-<span class="sourceLineNo">257</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.257"></a>
-<span class="sourceLineNo">258</span><a name="line.258"></a>
-<span class="sourceLineNo">259</span>  /***********<a name="line.259"></a>
-<span class="sourceLineNo">260</span>   * Options<a name="line.260"></a>
-<span class="sourceLineNo">261</span>   ***********/<a name="line.261"></a>
-<span class="sourceLineNo">262</span>  private static boolean details = false; // do we display the full report<a name="line.262"></a>
-<span class="sourceLineNo">263</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.263"></a>
-<span class="sourceLineNo">264</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.264"></a>
-<span class="sourceLineNo">265</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.265"></a>
-<span class="sourceLineNo">266</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.266"></a>
-<span class="sourceLineNo">267</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.267"></a>
-<span class="sourceLineNo">268</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.268"></a>
-<span class="sourceLineNo">269</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.269"></a>
-<span class="sourceLineNo">270</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.270"></a>
-<span class="sourceLineNo">271</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.271"></a>
-<span class="sourceLineNo">272</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.272"></a>
-<span class="sourceLineNo">273</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.273"></a>
-<span class="sourceLineNo">274</span>  private boolean removeParents = false; // remove split parents<a name="line.274"></a>
-<span class="sourceLineNo">275</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.275"></a>
-<span class="sourceLineNo">276</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.276"></a>
-<span class="sourceLineNo">277</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.277"></a>
-<span class="sourceLineNo">278</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.278"></a>
-<span class="sourceLineNo">279</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.279"></a>
-<span class="sourceLineNo">280</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.280"></a>
-<span class="sourceLineNo">281</span><a name="line.281"></a>
-<span class="sourceLineNo">282</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.282"></a>
-<span class="sourceLineNo">283</span>  // hbase:meta are always checked<a name="line.283"></a>
-<span class="sourceLineNo">284</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.284"></a>
-<span class="sourceLineNo">285</span>  private TableName cleanReplicationBarrierTable;<a name="line.285"></a>
-<span class="sourceLineNo">286</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.286"></a>
-<span class="sourceLineNo">287</span>  // maximum number of overlapping regions to sideline<a name="line.287"></a>
-<span class="sourceLineNo">288</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.288"></a>
-<span class="sourceLineNo">289</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.289"></a>
-<span class="sourceLineNo">290</span>  private Path sidelineDir = null;<a name="line.290"></a>
-<span class="sourceLineNo">291</span><a name="line.291"></a>
-<span class="sourceLineNo">292</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.292"></a>
-<span class="sourceLineNo">293</span>  private static boolean summary = false; // if we want to print less output<a name="line.293"></a>
-<span class="sourceLineNo">294</span>  private boolean checkMetaOnly = false;<a name="line.294"></a>
-<span class="sourceLineNo">295</span>  private boolean checkRegionBoundaries = false;<a name="line.295"></a>
-<span class="sourceLineNo">296</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.296"></a>
-<span class="sourceLineNo">297</span><a name="line.297"></a>
-<span class="sourceLineNo">298</span>  /*********<a name="line.298"></a>
-<span class="sourceLineNo">299</span>   * State<a name="line.299"></a>
-<span class="sourceLineNo">300</span>   *********/<a name="line.300"></a>
-<span class="sourceLineNo">301</span>  final private ErrorReporter errors;<a name="line.301"></a>
-<span class="sourceLineNo">302</span>  int fixes = 0;<a name="line.302"></a>
-<span class="sourceLineNo">303</span><a name="line.303"></a>
-<span class="sourceLineNo">304</span>  /**<a name="line.304"></a>
-<span class="sourceLineNo">305</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.305"></a>
-<span class="sourceLineNo">306</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.306"></a>
-<span class="sourceLineNo">307</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.307"></a>
-<span class="sourceLineNo">308</span>   */<a name="line.308"></a>
-<span class="sourceLineNo">309</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.309"></a>
-<span class="sourceLineNo">310</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.310"></a>
-<span class="sourceLineNo">311</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.311"></a>
+<span class="sourceLineNo">173</span> * Region consistency checks verify that hbase:meta, region deployment on region<a name="line.173"></a>
+<span class="sourceLineNo">174</span> * servers and the state of data in HDFS (.regioninfo files) all are in<a name="line.174"></a>
+<span class="sourceLineNo">175</span> * accordance.<a name="line.175"></a>
+<span class="sourceLineNo">176</span> * &lt;p&gt;<a name="line.176"></a>
+<span class="sourceLineNo">177</span> * Table integrity checks verify that all possible row keys resolve to exactly<a name="line.177"></a>
+<span class="sourceLineNo">178</span> * one region of a table.  This means there are no individual degenerate<a name="line.178"></a>
+<span class="sourceLineNo">179</span> * or backwards regions; no holes between regions; and that there are no<a name="line.179"></a>
+<span class="sourceLineNo">180</span> * overlapping regions.<a name="line.180"></a>
+<span class="sourceLineNo">181</span> * &lt;p&gt;<a name="line.181"></a>
+<span class="sourceLineNo">182</span> * The general repair strategy works in two phases:<a name="line.182"></a>
+<span class="sourceLineNo">183</span> * &lt;ol&gt;<a name="line.183"></a>
+<span class="sourceLineNo">184</span> * &lt;li&gt; Repair Table Integrity on HDFS. (merge or fabricate regions)<a name="line.184"></a>
+<span class="sourceLineNo">185</span> * &lt;li&gt; Repair Region Consistency with hbase:meta and assignments<a name="line.185"></a>
+<span class="sourceLineNo">186</span> * &lt;/ol&gt;<a name="line.186"></a>
+<span class="sourceLineNo">187</span> * &lt;p&gt;<a name="line.187"></a>
+<span class="sourceLineNo">188</span> * For table integrity repairs, the tables' region directories are scanned<a name="line.188"></a>
+<span class="sourceLineNo">189</span> * for .regioninfo files.  Each table's integrity is then verified.  If there<a name="line.189"></a>
+<span class="sourceLineNo">190</span> * are any orphan regions (regions with no .regioninfo files) or holes, new<a name="line.190"></a>
+<span class="sourceLineNo">191</span> * regions are fabricated.  Backwards regions are sidelined as well as empty<a name="line.191"></a>
+<span class="sourceLineNo">192</span> * degenerate (endkey==startkey) regions.  If there are any overlapping regions,<a name="line.192"></a>
+<span class="sourceLineNo">193</span> * a new region is created and all data is merged into the new region.<a name="line.193"></a>
+<span class="sourceLineNo">194</span> * &lt;p&gt;<a name="line.194"></a>
+<span class="sourceLineNo">195</span> * Table integrity repairs deal solely with HDFS and could potentially be done<a name="line.195"></a>
+<span class="sourceLineNo">196</span> * offline -- the hbase region servers or master do not need to be running.<a name="line.196"></a>
+<span class="sourceLineNo">197</span> * This phase can eventually be used to completely reconstruct the hbase:meta table in<a name="line.197"></a>
+<span class="sourceLineNo">198</span> * an offline fashion.<a name="line.198"></a>
+<span class="sourceLineNo">199</span> * &lt;p&gt;<a name="line.199"></a>
+<span class="sourceLineNo">200</span> * Region consistency requires three conditions -- 1) valid .regioninfo file<a name="line.200"></a>
+<span class="sourceLineNo">201</span> * present in an HDFS region dir,  2) valid row with .regioninfo data in META,<a name="line.201"></a>
+<span class="sourceLineNo">202</span> * and 3) a region is deployed only at the regionserver that was assigned to<a name="line.202"></a>
+<span class="sourceLineNo">203</span> * with proper state in the master.<a name="line.203"></a>
+<span class="sourceLineNo">204</span> * &lt;p&gt;<a name="line.204"></a>
+<span class="sourceLineNo">205</span> * Region consistency repairs require hbase to be online so that hbck can<a name="line.205"></a>
+<span class="sourceLineNo">206</span> * contact the HBase master and region servers.  The hbck#connect() method must<a name="line.206"></a>
+<span class="sourceLineNo">207</span> * first be called successfully.  Much of the region consistency information<a name="line.207"></a>
+<span class="sourceLineNo">208</span> * is transient and less risky to repair.<a name="line.208"></a>
+<span class="sourceLineNo">209</span> * &lt;p&gt;<a name="line.209"></a>
+<span class="sourceLineNo">210</span> * If hbck is run from the command line, there are a handful of arguments that<a name="line.210"></a>
+<span class="sourceLineNo">211</span> * can be used to limit the kinds of repairs hbck will do.  See the code in<a name="line.211"></a>
+<span class="sourceLineNo">212</span> * {@link #printUsageAndExit()} for more details.<a name="line.212"></a>
+<span class="sourceLineNo">213</span> */<a name="line.213"></a>
+<span class="sourceLineNo">214</span>@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)<a name="line.214"></a>
+<span class="sourceLineNo">215</span>@InterfaceStability.Evolving<a name="line.215"></a>
+<span class="sourceLineNo">216</span>public class HBaseFsck extends Configured implements Closeable {<a name="line.216"></a>
+<span class="sourceLineNo">217</span>  public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute<a name="line.217"></a>
+<span class="sourceLineNo">218</span>  public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;<a name="line.218"></a>
+<span class="sourceLineNo">219</span>  private static final int MAX_NUM_THREADS = 50; // #threads to contact regions<a name="line.219"></a>
+<span class="sourceLineNo">220</span>  private static boolean rsSupportsOffline = true;<a name="line.220"></a>
+<span class="sourceLineNo">221</span>  private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;<a name="line.221"></a>
+<span class="sourceLineNo">222</span>  private static final int DEFAULT_MAX_MERGE = 5;<a name="line.222"></a>
+<span class="sourceLineNo">223</span>  private static final String TO_BE_LOADED = "to_be_loaded";<a name="line.223"></a>
+<span class="sourceLineNo">224</span>  /**<a name="line.224"></a>
+<span class="sourceLineNo">225</span>   * Here is where hbase-1.x used to default the lock for hbck1.<a name="line.225"></a>
+<span class="sourceLineNo">226</span>   * It puts in place a lock when it goes to write/make changes.<a name="line.226"></a>
+<span class="sourceLineNo">227</span>   */<a name="line.227"></a>
+<span class="sourceLineNo">228</span>  @VisibleForTesting<a name="line.228"></a>
+<span class="sourceLineNo">229</span>  public static final String HBCK_LOCK_FILE = "hbase-hbck.lock";<a name="line.229"></a>
+<span class="sourceLineNo">230</span>  private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;<a name="line.230"></a>
+<span class="sourceLineNo">231</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.231"></a>
+<span class="sourceLineNo">232</span>  private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.232"></a>
+<span class="sourceLineNo">233</span>  // We have to set the timeout value &gt; HdfsConstants.LEASE_SOFTLIMIT_PERIOD.<a name="line.233"></a>
+<span class="sourceLineNo">234</span>  // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for<a name="line.234"></a>
+<span class="sourceLineNo">235</span>  // AlreadyBeingCreatedException which is implies timeout on this operations up to<a name="line.235"></a>
+<span class="sourceLineNo">236</span>  // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).<a name="line.236"></a>
+<span class="sourceLineNo">237</span>  private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds<a name="line.237"></a>
+<span class="sourceLineNo">238</span>  private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;<a name="line.238"></a>
+<span class="sourceLineNo">239</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds<a name="line.239"></a>
+<span class="sourceLineNo">240</span>  private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds<a name="line.240"></a>
+<span class="sourceLineNo">241</span><a name="line.241"></a>
+<span class="sourceLineNo">242</span>  /**********************<a name="line.242"></a>
+<span class="sourceLineNo">243</span>   * Internal resources<a name="line.243"></a>
+<span class="sourceLineNo">244</span>   **********************/<a name="line.244"></a>
+<span class="sourceLineNo">245</span>  private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName());<a name="line.245"></a>
+<span class="sourceLineNo">246</span>  private ClusterMetrics status;<a name="line.246"></a>
+<span class="sourceLineNo">247</span>  private ClusterConnection connection;<a name="line.247"></a>
+<span class="sourceLineNo">248</span>  private Admin admin;<a name="line.248"></a>
+<span class="sourceLineNo">249</span>  private Table meta;<a name="line.249"></a>
+<span class="sourceLineNo">250</span>  // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions<a name="line.250"></a>
+<span class="sourceLineNo">251</span>  protected ExecutorService executor;<a name="line.251"></a>
+<span class="sourceLineNo">252</span>  private long startMillis = EnvironmentEdgeManager.currentTime();<a name="line.252"></a>
+<span class="sourceLineNo">253</span>  private HFileCorruptionChecker hfcc;<a name="line.253"></a>
+<span class="sourceLineNo">254</span>  private int retcode = 0;<a name="line.254"></a>
+<span class="sourceLineNo">255</span>  private Path HBCK_LOCK_PATH;<a name="line.255"></a>
+<span class="sourceLineNo">256</span>  private FSDataOutputStream hbckOutFd;<a name="line.256"></a>
+<span class="sourceLineNo">257</span>  // This lock is to prevent cleanup of balancer resources twice between<a name="line.257"></a>
+<span class="sourceLineNo">258</span>  // ShutdownHook and the main code. We cleanup only if the connect() is<a name="line.258"></a>
+<span class="sourceLineNo">259</span>  // successful<a name="line.259"></a>
+<span class="sourceLineNo">260</span>  private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);<a name="line.260"></a>
+<span class="sourceLineNo">261</span><a name="line.261"></a>
+<span class="sourceLineNo">262</span>  // Unsupported options in HBase 2.0+<a name="line.262"></a>
+<span class="sourceLineNo">263</span>  private static final Set&lt;String&gt; unsupportedOptionsInV2 = Sets.newHashSet("-fix",<a name="line.263"></a>
+<span class="sourceLineNo">264</span>      "-fixAssignments", "-fixMeta", "-fixHdfsHoles", "-fixHdfsOrphans", "-fixTableOrphans",<a name="line.264"></a>
+<span class="sourceLineNo">265</span>      "-fixHdfsOverlaps", "-sidelineBigOverlaps", "-fixSplitParents", "-removeParents",<a name="line.265"></a>
+<span class="sourceLineNo">266</span>      "-fixEmptyMetaCells", "-repair", "-repairHoles", "-maxOverlapsToSideline", "-maxMerge");<a name="line.266"></a>
+<span class="sourceLineNo">267</span><a name="line.267"></a>
+<span class="sourceLineNo">268</span>  /***********<a name="line.268"></a>
+<span class="sourceLineNo">269</span>   * Options<a name="line.269"></a>
+<span class="sourceLineNo">270</span>   ***********/<a name="line.270"></a>
+<span class="sourceLineNo">271</span>  private static boolean details = false; // do we display the full report<a name="line.271"></a>
+<span class="sourceLineNo">272</span>  private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older<a name="line.272"></a>
+<span class="sourceLineNo">273</span>  private static boolean forceExclusive = false; // only this hbck can modify HBase<a name="line.273"></a>
+<span class="sourceLineNo">274</span>  private boolean fixAssignments = false; // fix assignment errors?<a name="line.274"></a>
+<span class="sourceLineNo">275</span>  private boolean fixMeta = false; // fix meta errors?<a name="line.275"></a>
+<span class="sourceLineNo">276</span>  private boolean checkHdfs = true; // load and check fs consistency?<a name="line.276"></a>
+<span class="sourceLineNo">277</span>  private boolean fixHdfsHoles = false; // fix fs holes?<a name="line.277"></a>
+<span class="sourceLineNo">278</span>  private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)<a name="line.278"></a>
+<span class="sourceLineNo">279</span>  private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)<a name="line.279"></a>
+<span class="sourceLineNo">280</span>  private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)<a name="line.280"></a>
+<span class="sourceLineNo">281</span>  private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs<a name="line.281"></a>
+<span class="sourceLineNo">282</span>  private boolean fixSplitParents = false; // fix lingering split parents<a name="line.282"></a>
+<span class="sourceLineNo">283</span>  private boolean removeParents = false; // remove split parents<a name="line.283"></a>
+<span class="sourceLineNo">284</span>  private boolean fixReferenceFiles = false; // fix lingering reference store file<a name="line.284"></a>
+<span class="sourceLineNo">285</span>  private boolean fixHFileLinks = false; // fix lingering HFileLinks<a name="line.285"></a>
+<span class="sourceLineNo">286</span>  private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows<a name="line.286"></a>
+<span class="sourceLineNo">287</span>  private boolean fixReplication = false; // fix undeleted replication queues for removed peer<a name="line.287"></a>
+<span class="sourceLineNo">288</span>  private boolean cleanReplicationBarrier = false; // clean replication barriers of a table<a name="line.288"></a>
+<span class="sourceLineNo">289</span>  private boolean fixAny = false; // Set to true if any of the fix is required.<a name="line.289"></a>
+<span class="sourceLineNo">290</span><a name="line.290"></a>
+<span class="sourceLineNo">291</span>  // limit checking/fixes to listed tables, if empty attempt to check/fix all<a name="line.291"></a>
+<span class="sourceLineNo">292</span>  // hbase:meta are always checked<a name="line.292"></a>
+<span class="sourceLineNo">293</span>  private Set&lt;TableName&gt; tablesIncluded = new HashSet&lt;&gt;();<a name="line.293"></a>
+<span class="sourceLineNo">294</span>  private TableName cleanReplicationBarrierTable;<a name="line.294"></a>
+<span class="sourceLineNo">295</span>  private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge<a name="line.295"></a>
+<span class="sourceLineNo">296</span>  // maximum number of overlapping regions to sideline<a name="line.296"></a>
+<span class="sourceLineNo">297</span>  private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;<a name="line.297"></a>
+<span class="sourceLineNo">298</span>  private boolean sidelineBigOverlaps = false; // sideline overlaps with &gt;maxMerge regions<a name="line.298"></a>
+<span class="sourceLineNo">299</span>  private Path sidelineDir = null;<a name="line.299"></a>
+<span class="sourceLineNo">300</span><a name="line.300"></a>
+<span class="sourceLineNo">301</span>  private boolean rerun = false; // if we tried to fix something, rerun hbck<a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private static boolean summary = false; // if we want to print less output<a name="line.302"></a>
+<span class="sourceLineNo">303</span>  private boolean checkMetaOnly = false;<a name="line.303"></a>
+<span class="sourceLineNo">304</span>  private boolean checkRegionBoundaries = false;<a name="line.304"></a>
+<span class="sourceLineNo">305</span>  private boolean ignorePreCheckPermission = false; // if pre-check permission<a name="line.305"></a>
+<span class="sourceLineNo">306</span><a name="line.306"></a>
+<span class="sourceLineNo">307</span>  /*********<a name="line.307"></a>
+<span class="sourceLineNo">308</span>   * State<a name="line.308"></a>
+<span class="sourceLineNo">309</span>   *********/<a name="line.309"></a>
+<span class="sourceLineNo">310</span>  final private ErrorReporter errors;<a name="line.310"></a>
+<span class="sourceLineNo">311</span>  int fixes = 0;<a name="line.311"></a>
 <span class="sourceLineNo">312</span><a name="line.312"></a>
 <span class="sourceLineNo">313</span>  /**<a name="line.313"></a>
-<span class="sourceLineNo">314</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.314"></a>
-<span class="sourceLineNo">315</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.315"></a>
-<span class="sourceLineNo">316</span>   * to prevent dupes.<a name="line.316"></a>
-<span class="sourceLineNo">317</span>   *<a name="line.317"></a>
-<span class="sourceLineNo">318</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.318"></a>
-<span class="sourceLineNo">319</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.319"></a>
-<span class="sourceLineNo">320</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.320"></a>
-<span class="sourceLineNo">321</span>   * the meta table<a name="line.321"></a>
-<span class="sourceLineNo">322</span>   */<a name="line.322"></a>
-<span class="sourceLineNo">323</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.323"></a>
-<span class="sourceLineNo">324</span><a name="line.324"></a>
-<span class="sourceLineNo">325</span>  /**<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.326"></a>
-<span class="sourceLineNo">327</span>   */<a name="line.327"></a>
-<span class="sourceLineNo">328</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.328"></a>
-<span class="sourceLineNo">329</span><a name="line.329"></a>
-<span class="sourceLineNo">330</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.330"></a>
-<span class="sourceLineNo">331</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.331"></a>
-<span class="sourceLineNo">332</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.332"></a>
-<span class="sourceLineNo">333</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.333"></a>
-<span class="sourceLineNo">334</span><a name="line.334"></a>
-<span class="sourceLineNo">335</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.335"></a>
-<span class="sourceLineNo">336</span><a name="line.336"></a>
-<span class="sourceLineNo">337</span>  private ZKWatcher zkw = null;<a name="line.337"></a>
-<span class="sourceLineNo">338</span>  private String hbckEphemeralNodePath = null;<a name="line.338"></a>
-<span class="sourceLineNo">339</span>  private boolean hbckZodeCreated = false;<a name="line.339"></a>
-<span class="sourceLineNo">340</span><a name="line.340"></a>
-<span class="sourceLineNo">341</span>  /**<a name="line.341"></a>
-<span class="sourceLineNo">342</span>   * Constructor<a name="line.342"></a>
-<span class="sourceLineNo">343</span>   *<a name="line.343"></a>
-<span class="sourceLineNo">344</span>   * @param conf Configuration object<a name="line.344"></a>
-<span class="sourceLineNo">345</span>   * @throws MasterNotRunningException if the master is not running<a name="line.345"></a>
-<span class="sourceLineNo">346</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.346"></a>
-<span class="sourceLineNo">347</span>   */<a name="line.347"></a>
-<span class="sourceLineNo">348</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.348"></a>
-<span class="sourceLineNo">349</span>    this(conf, createThreadPool(conf));<a name="line.349"></a>
-<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
-<span class="sourceLineNo">351</span><a name="line.351"></a>
-<span class="sourceLineNo">352</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.354"></a>
-<span class="sourceLineNo">355</span>  }<a name="line.355"></a>
-<span class="sourceLineNo">356</span><a name="line.356"></a>
-<span class="sourceLineNo">357</span>  /**<a name="line.357"></a>
-<span class="sourceLineNo">358</span>   * Constructor<a name="line.358"></a>
-<span class="sourceLineNo">359</span>   *<a name="line.359"></a>
-<span class="sourceLineNo">360</span>   * @param conf<a name="line.360"></a>
-<span class="sourceLineNo">361</span>   *          Configuration object<a name="line.361"></a>
-<span class="sourceLineNo">362</span>   * @throws MasterNotRunningException<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   *           if the master is not running<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @throws ZooKeeperConnectionException<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   *           if unable to connect to ZooKeeper<a name="line.365"></a>
-<span class="sourceLineNo">366</span>   */<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.367"></a>
-<span class="sourceLineNo">368</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.368"></a>
-<span class="sourceLineNo">369</span>    super(conf);<a name="line.369"></a>
-<span class="sourceLineNo">370</span>    errors = getErrorReporter(getConf());<a name="line.370"></a>
-<span class="sourceLineNo">371</span>    this.executor = exec;<a name="line.371"></a>
-<span class="sourceLineNo">372</span>    lockFileRetryCounterFactory = new RetryCounterFactory(<a name="line.372"></a>
-<span class="sourceLineNo">373</span>      getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.373"></a>
-<span class="sourceLineNo">374</span>      getConf().getInt(<a name="line.374"></a>
-<span class="sourceLineNo">375</span>        "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.375"></a>
-<span class="sourceLineNo">376</span>      getConf().getInt(<a name="line.376"></a>
-<span class="sourceLineNo">377</span>        "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    createZNodeRetryCounterFactory = new RetryCounterFactory(<a name="line.378"></a>
-<span class="sourceLineNo">379</span>      getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.379"></a>
-<span class="sourceLineNo">380</span>      getConf().getInt(<a name="line.380"></a>
-<span class="sourceLineNo">381</span>        "hbase.hbck.createznode.attempt.sleep.interval",<a name="line.381"></a>
-<span class="sourceLineNo">382</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.382"></a>
-<span class="sourceLineNo">383</span>      getConf().getInt(<a name="line.383"></a>
-<span class="sourceLineNo">384</span>        "hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.384"></a>
-<span class="sourceLineNo">385</span>        DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.385"></a>
-<span class="sourceLineNo">386</span>    zkw = createZooKeeperWatcher();<a name="line.386"></a>
-<span class="sourceLineNo">387</span>  }<a name="line.387"></a>
-<span class="sourceLineNo">388</span><a name="line.388"></a>
-<span class="sourceLineNo">389</span>  private class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.389"></a>
-<span class="sourceLineNo">390</span>    RetryCounter retryCounter;<a name="line.390"></a>
-<span class="sourceLineNo">391</span><a name="line.391"></a>
-<span class="sourceLineNo">392</span>    public FileLockCallable(RetryCounter retryCounter) {<a name="line.392"></a>
-<span class="sourceLineNo">393</span>      this.retryCounter = retryCounter;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    }<a name="line.394"></a>
-<span class="sourceLineNo">395</span>    @Override<a name="line.395"></a>
-<span class="sourceLineNo">396</span>    public FSDataOutputStream call() throws IOException {<a name="line.396"></a>
-<span class="sourceLineNo">397</span>      try {<a name="line.397"></a>
-<span class="sourceLineNo">398</span>        FileSystem fs = FSUtils.getCurrentFileSystem(getConf());<a name="line.398"></a>
-<span class="sourceLineNo">399</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),<a name="line.399"></a>
-<span class="sourceLineNo">400</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.400"></a>
-<span class="sourceLineNo">401</span>        Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.401"></a>
-<span class="sourceLineNo">402</span>        fs.mkdirs(tmpDir);<a name="line.402"></a>
-<span class="sourceLineNo">403</span>        HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.403"></a>
-<span class="sourceLineNo">404</span>        final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);<a name="line.404"></a>
-<span class="sourceLineNo">405</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.405"></a>
-<span class="sourceLineNo">406</span>        out.flush();<a name="line.406"></a>
-<span class="sourceLineNo">407</span>        return out;<a name="line.407"></a>
-<span class="sourceLineNo">408</span>      } catch(RemoteException e) {<a name="line.408"></a>
-<span class="sourceLineNo">409</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.409"></a>
-<span class="sourceLineNo">410</span>          return null;<a name="line.410"></a>
-<span class="sourceLineNo">411</span>        } else {<a name="line.411"></a>
-<span class="sourceLineNo">412</span>          throw e;<a name="line.412"></a>
-<span class="sourceLineNo">413</span>        }<a name="line.413"></a>
-<span class="sourceLineNo">414</span>      }<a name="line.414"></a>
-<span class="sourceLineNo">415</span>    }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.417"></a>
-<span class="sourceLineNo">418</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.418"></a>
-<span class="sourceLineNo">419</span>        throws IOException {<a name="line.419"></a>
-<span class="sourceLineNo">420</span><a name="line.420"></a>
-<span class="sourceLineNo">421</span>      IOException exception = null;<a name="line.421"></a>
-<span class="sourceLineNo">422</span>      do {<a name="line.422"></a>
-<span class="sourceLineNo">423</span>        try {<a name="line.423"></a>
-<span class="sourceLineNo">424</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.424"></a>
-<span class="sourceLineNo">425</span>        } catch (IOException ioe) {<a name="line.425"></a>
-<span class="sourceLineNo">426</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.426"></a>
-<span class="sourceLineNo">427</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.427"></a>
-<span class="sourceLineNo">428</span>              + retryCounter.getMaxAttempts());<a name="line.428"></a>
-<span class="sourceLineNo">429</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.429"></a>
-<span class="sourceLineNo">430</span>              ioe);<a name="line.430"></a>
-<span class="sourceLineNo">431</span>          try {<a name="line.431"></a>
-<span class="sourceLineNo">432</span>            exception = ioe;<a name="line.432"></a>
-<span class="sourceLineNo">433</span>            retryCounter.sleepUntilNextRetry();<a name="line.433"></a>
-<span class="sourceLineNo">434</span>          } catch (InterruptedException ie) {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.435"></a>
-<span class="sourceLineNo">436</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.436"></a>
-<span class="sourceLineNo">437</span>            .initCause(ie);<a name="line.437"></a>
-<span class="sourceLineNo">438</span>          }<a name="line.438"></a>
-<span class="sourceLineNo">439</span>        }<a name="line.439"></a>
-<span class="sourceLineNo">440</span>      } while (retryCounter.shouldRetry());<a name="line.440"></a>
-<span class="sourceLineNo">441</span><a name="line.441"></a>
-<span class="sourceLineNo">442</span>      throw exception;<a name="line.442"></a>
-<span class="sourceLineNo">443</span>    }<a name="line.443"></a>
-<span class="sourceLineNo">444</span>  }<a name="line.444"></a>
-<span class="sourceLineNo">445</span><a name="line.445"></a>
-<span class="sourceLineNo">446</span>  /**<a name="line.446"></a>
-<span class="sourceLineNo">447</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.447"></a>
-<span class="sourceLineNo">448</span>   *<a name="line.448"></a>
-<span class="sourceLineNo">449</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.449"></a>
-<span class="sourceLineNo">450</span>   * @throws IOException if IO failure occurs<a name="line.450"></a>
-<span class="sourceLineNo">451</span>   */<a name="line.451"></a>
-<span class="sourceLineNo">452</span>  private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {<a name="line.452"></a>
-<span class="sourceLineNo">453</span>    RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.453"></a>
-<span class="sourceLineNo">454</span>    FileLockCallable callable = new FileLockCallable(retryCounter);<a name="line.454"></a>
-<span class="sourceLineNo">455</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.455"></a>
-<span class="sourceLineNo">456</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.456"></a>
-<span class="sourceLineNo">457</span>    executor.execute(futureTask);<a name="line.457"></a>
-<span class="sourceLineNo">458</span>    final int timeoutInSeconds = getConf().getInt(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.459"></a>
-<span class="sourceLineNo">460</span>    FSDataOutputStream stream = null;<a name="line.460"></a>
-<span class="sourceLineNo">461</span>    try {<a name="line.461"></a>
-<span class="sourceLineNo">462</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.462"></a>
-<span class="sourceLineNo">463</span>    } catch (ExecutionException ee) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.464"></a>
-<span class="sourceLineNo">465</span>    } catch (InterruptedException ie) {<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.466"></a>
-<span class="sourceLineNo">467</span>      Thread.currentThread().interrupt();<a name="line.467"></a>
-<span class="sourceLineNo">468</span>    } catch (TimeoutException exception) {<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      // took too long to obtain lock<a name="line.469"></a>
-<span class="sourceLineNo">470</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.470"></a>
-<span class="sourceLineNo">471</span>      futureTask.cancel(true);<a name="line.471"></a>
-<span class="sourceLineNo">472</span>    } finally {<a name="line.472"></a>
-<span class="sourceLineNo">473</span>      executor.shutdownNow();<a name="line.473"></a>
-<span class="sourceLineNo">474</span>    }<a name="line.474"></a>
-<span class="sourceLineNo">475</span>    return stream;<a name="line.475"></a>
-<span class="sourceLineNo">476</span>  }<a name="line.476"></a>
-<span class="sourceLineNo">477</span><a name="line.477"></a>
-<span class="sourceLineNo">478</span>  private void unlockHbck() {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      do {<a name="line.481"></a>
-<span class="sourceLineNo">482</span>        try {<a name="line.482"></a>
-<span class="sourceLineNo">483</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.483"></a>
-<span class="sourceLineNo">484</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),<a name="line.484"></a>
-<span class="sourceLineNo">485</span>              HBCK_LOCK_PATH, true);<a name="line.485"></a>
-<span class="sourceLineNo">486</span>          LOG.info("Finishing hbck");<a name="line.486"></a>
-<span class="sourceLineNo">487</span>          return;<a name="line.487"></a>
-<span class="sourceLineNo">488</span>        } catch (IOException ioe) {<a name="line.488"></a>
-<span class="sourceLineNo">489</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.489"></a>
-<span class="sourceLineNo">490</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.490"></a>
-<span class="sourceLineNo">491</span>              + retryCounter.getMaxAttempts());<a name="line.491"></a>
-<span class="sourceLineNo">492</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.492"></a>
-<span class="sourceLineNo">493</span>          try {<a name="line.493"></a>
-<span class="sourceLineNo">494</span>            retryCounter.sleepUntilNextRetry();<a name="line.494"></a>
-<span class="sourceLineNo">495</span>          } catch (InterruptedException ie) {<a name="line.495"></a>
-<span class="sourceLineNo">496</span>            Thread.currentThread().interrupt();<a name="line.496"></a>
-<span class="sourceLineNo">497</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.497"></a>
-<span class="sourceLineNo">498</span>                HBCK_LOCK_PATH);<a name="line.498"></a>
-<span class="sourceLineNo">499</span>            return;<a name="line.499"></a>
-<span class="sourceLineNo">500</span>          }<a name="line.500"></a>
-<span class="sourceLineNo">501</span>        }<a name="line.501"></a>
-<span class="sourceLineNo">502</span>      } while (retryCounter.shouldRetry());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
-<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
-<span class="sourceLineNo">505</span><a name="line.505"></a>
-<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   * online state.<a name="line.508"></a>
-<span class="sourceLineNo">509</span>   */<a name="line.509"></a>
-<span class="sourceLineNo">510</span>  public void connect() throws IOException {<a name="line.510"></a>
-<span class="sourceLineNo">511</span><a name="line.511"></a>
-<span class="sourceLineNo">512</span>    if (isExclusive()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>      // Grab the lock<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      hbckOutFd = checkAndMarkRunningHbck();<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      if (hbckOutFd == null) {<a name="line.515"></a>
-<span class="sourceLineNo">516</span>        setRetCode(-1);<a name="line.516"></a>
-<span class="sourceLineNo">517</span>        LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +<a name="line.517"></a>
-<span class="sourceLineNo">518</span>            "[If you are sure no other instance is running, delete the lock file " +<a name="line.518"></a>
-<span class="sourceLineNo">519</span>            HBCK_LOCK_PATH + " and rerun the tool]");<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        throw new IOException("Duplicate hbck - Abort");<a name="line.520"></a>
-<span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span><a name="line.522"></a>
-<span class="sourceLineNo">523</span>      // Make sure to cleanup the lock<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      hbckLockCleanup.set(true);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span><a name="line.527"></a>
-<span class="sourceLineNo">528</span>    // Add a shutdown hook to this thread, in case user tries to<a name="line.528"></a>
-<span class="sourceLineNo">529</span>    // kill the hbck with a ctrl-c, we want to cleanup the lock so that<a name="line.529"></a>
-<span class="sourceLineNo">530</span>    // it is available for further calls<a name="line.530"></a>
-<span class="sourceLineNo">531</span>    Runtime.getRuntime().addShutdownHook(new Thread() {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>      @Override<a name="line.532"></a>
-<span class="sourceLineNo">533</span>      public void run() {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>        IOUtils.closeQuietly(HBaseFsck.this);<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        cleanupHbckZnode();<a name="line.535"></a>
-<span class="sourceLineNo">536</span>        unlockHbck();<a name="line.536"></a>
-<span class="sourceLineNo">537</span>      }<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    });<a name="line.538"></a>
-<span class="sourceLineNo">539</span><a name="line.539"></a>
-<span class="sourceLineNo">540</span>    LOG.info("Launching hbck");<a name="line.540"></a>
-<span class="sourceLineNo">541</span><a name="line.541"></a>
-<span class="sourceLineNo">542</span>    connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());<a name="line.542"></a>
-<span class="sourceLineNo">543</span>    admin = connection.getAdmin();<a name="line.543"></a>
-<span class="sourceLineNo">544</span>    meta = connection.getTable(TableName.META_TABLE_NAME);<a name="line.544"></a>
-<span class="sourceLineNo">545</span>    status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS,<a name="line.545"></a>
-<span class="sourceLineNo">546</span>      Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS,<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));<a name="line.547"></a>
-<span class="sourceLineNo">548</span>  }<a name="line.548"></a>
-<span class="sourceLineNo">549</span><a name="line.549"></a>
-<span class="sourceLineNo">550</span>  /**<a name="line.550"></a>
-<span class="sourceLineNo">551</span>   * Get deployed regions according to the region servers.<a name="line.551"></a>
-<span class="sourceLineNo">552</span>   */<a name="line.552"></a>
-<span class="sourceLineNo">553</span>  private void loadDeployedRegions() throws IOException, InterruptedException {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>    // From the master, get a list of all known live region servers<a name="line.554"></a>
-<span class="sourceLineNo">555</span>    Collection&lt;ServerName&gt; regionServers = status.getLiveServerMetrics().keySet();<a name="line.555"></a>
-<span class="sourceLineNo">556</span>    errors.print("Number of live region servers: " + regionServers.size());<a name="line.556"></a>
-<span class="sourceLineNo">557</span>    if (details) {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      for (ServerName rsinfo: regionServers) {<a name="line.558"></a>
-<span class="sourceLineNo">559</span>        errors.print("  " + rsinfo.getServerName());<a name="line.559"></a>
-<span class="sourceLineNo">560</span>      }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
-<span class="sourceLineNo">562</span><a name="line.562"></a>
-<span class="sourceLineNo">563</span>    // From the master, get a list of all dead region servers<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    Collection&lt;ServerName&gt; deadRegionServers = status.getDeadServerNames();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    errors.print("Number of dead region servers: " + deadRegionServers.size());<a name="line.565"></a>
-<span class="sourceLineNo">566</span>    if (details) {<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      for (ServerName name: deadRegionServers) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        errors.print("  " + name);<a name="line.568"></a>
-<span class="sourceLineNo">569</span>      }<a name="line.569"></a>
+<span class="sourceLineNo">314</span>   * This map contains the state of all hbck items.  It maps from encoded region<a name="line.314"></a>
+<span class="sourceLineNo">315</span>   * name to HbckInfo structure.  The information contained in HbckInfo is used<a name="line.315"></a>
+<span class="sourceLineNo">316</span>   * to detect and correct consistency (hdfs/meta/deployment) problems.<a name="line.316"></a>
+<span class="sourceLineNo">317</span>   */<a name="line.317"></a>
+<span class="sourceLineNo">318</span>  private TreeMap&lt;String, HbckInfo&gt; regionInfoMap = new TreeMap&lt;&gt;();<a name="line.318"></a>
+<span class="sourceLineNo">319</span>  // Empty regioninfo qualifiers in hbase:meta<a name="line.319"></a>
+<span class="sourceLineNo">320</span>  private Set&lt;Result&gt; emptyRegionInfoQualifiers = new HashSet&lt;&gt;();<a name="line.320"></a>
+<span class="sourceLineNo">321</span><a name="line.321"></a>
+<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
+<span class="sourceLineNo">323</span>   * This map from Tablename -&gt; TableInfo contains the structures necessary to<a name="line.323"></a>
+<span class="sourceLineNo">324</span>   * detect table consistency problems (holes, dupes, overlaps).  It is sorted<a name="line.324"></a>
+<span class="sourceLineNo">325</span>   * to prevent dupes.<a name="line.325"></a>
+<span class="sourceLineNo">326</span>   *<a name="line.326"></a>
+<span class="sourceLineNo">327</span>   * If tablesIncluded is empty, this map contains all tables.<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Otherwise, it contains only meta tables and tables in tablesIncluded,<a name="line.328"></a>
+<span class="sourceLineNo">329</span>   * unless checkMetaOnly is specified, in which case, it contains only<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * the meta table<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  private SortedMap&lt;TableName, TableInfo&gt; tablesInfo = new ConcurrentSkipListMap&lt;&gt;();<a name="line.332"></a>
+<span class="sourceLineNo">333</span><a name="line.333"></a>
+<span class="sourceLineNo">334</span>  /**<a name="line.334"></a>
+<span class="sourceLineNo">335</span>   * When initially looking at HDFS, we attempt to find any orphaned data.<a name="line.335"></a>
+<span class="sourceLineNo">336</span>   */<a name="line.336"></a>
+<span class="sourceLineNo">337</span>  private List&lt;HbckInfo&gt; orphanHdfsDirs = Collections.synchronizedList(new ArrayList&lt;HbckInfo&gt;());<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; orphanTableDirs = new HashMap&lt;&gt;();<a name="line.339"></a>
+<span class="sourceLineNo">340</span>  private Map&lt;TableName, TableState&gt; tableStates = new HashMap&lt;&gt;();<a name="line.340"></a>
+<span class="sourceLineNo">341</span>  private final RetryCounterFactory lockFileRetryCounterFactory;<a name="line.341"></a>
+<span class="sourceLineNo">342</span>  private final RetryCounterFactory createZNodeRetryCounterFactory;<a name="line.342"></a>
+<span class="sourceLineNo">343</span><a name="line.343"></a>
+<span class="sourceLineNo">344</span>  private Map&lt;TableName, Set&lt;String&gt;&gt; skippedRegions = new HashMap&lt;&gt;();<a name="line.344"></a>
+<span class="sourceLineNo">345</span><a name="line.345"></a>
+<span class="sourceLineNo">346</span>  private ZKWatcher zkw = null;<a name="line.346"></a>
+<span class="sourceLineNo">347</span>  private String hbckEphemeralNodePath = null;<a name="line.347"></a>
+<span class="sourceLineNo">348</span>  private boolean hbckZodeCreated = false;<a name="line.348"></a>
+<span class="sourceLineNo">349</span><a name="line.349"></a>
+<span class="sourceLineNo">350</span>  /**<a name="line.350"></a>
+<span class="sourceLineNo">351</span>   * Constructor<a name="line.351"></a>
+<span class="sourceLineNo">352</span>   *<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @param conf Configuration object<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @throws MasterNotRunningException if the master is not running<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   * @throws ZooKeeperConnectionException if unable to connect to ZooKeeper<a name="line.355"></a>
+<span class="sourceLineNo">356</span>   */<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    this(conf, createThreadPool(conf));<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  private static ExecutorService createThreadPool(Configuration conf) {<a name="line.361"></a>
+<span class="sourceLineNo">362</span>    int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);<a name="line.362"></a>
+<span class="sourceLineNo">363</span>    return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));<a name="line.363"></a>
+<span class="sourceLineNo">364</span>  }<a name="line.364"></a>
+<span class="sourceLineNo">365</span><a name="line.365"></a>
+<span class="sourceLineNo">366</span>  /**<a name="line.366"></a>
+<span class="sourceLineNo">367</span>   * Constructor<a name="line.367"></a>
+<span class="sourceLineNo">368</span>   *<a name="line.368"></a>
+<span class="sourceLineNo">369</span>   * @param conf<a name="line.369"></a>
+<span class="sourceLineNo">370</span>   *          Configuration object<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * @throws MasterNotRunningException<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   *           if the master is not running<a name="line.372"></a>
+<span class="sourceLineNo">373</span>   * @throws ZooKeeperConnectionException<a name="line.373"></a>
+<span class="sourceLineNo">374</span>   *           if unable to connect to ZooKeeper<a name="line.374"></a>
+<span class="sourceLineNo">375</span>   */<a name="line.375"></a>
+<span class="sourceLineNo">376</span>  public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,<a name="line.376"></a>
+<span class="sourceLineNo">377</span>      ZooKeeperConnectionException, IOException, ClassNotFoundException {<a name="line.377"></a>
+<span class="sourceLineNo">378</span>    super(conf);<a name="line.378"></a>
+<span class="sourceLineNo">379</span>    errors = getErrorReporter(getConf());<a name="line.379"></a>
+<span class="sourceLineNo">380</span>    this.executor = exec;<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    lockFileRetryCounterFactory = createLockRetryCounterFactory(getConf());<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    createZNodeRetryCounterFactory = createZnodeRetryCounterFactory(getConf());<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    zkw = createZooKeeperWatcher();<a name="line.383"></a>
+<span class="sourceLineNo">384</span>  }<a name="line.384"></a>
+<span class="sourceLineNo">385</span><a name="line.385"></a>
+<span class="sourceLineNo">386</span>  /**<a name="line.386"></a>
+<span class="sourceLineNo">387</span>   * @return A retry counter factory configured for retrying lock file creation.<a name="line.387"></a>
+<span class="sourceLineNo">388</span>   */<a name="line.388"></a>
+<span class="sourceLineNo">389</span>  public static RetryCounterFactory createLockRetryCounterFactory(Configuration conf) {<a name="line.389"></a>
+<span class="sourceLineNo">390</span>    return new RetryCounterFactory(<a name="line.390"></a>
+<span class="sourceLineNo">391</span>        conf.getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),<a name="line.391"></a>
+<span class="sourceLineNo">392</span>        conf.getInt("hbase.hbck.lockfile.attempt.sleep.interval",<a name="line.392"></a>
+<span class="sourceLineNo">393</span>            DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),<a name="line.393"></a>
+<span class="sourceLineNo">394</span>        conf.getInt("hbase.hbck.lockfile.attempt.maxsleeptime",<a name="line.394"></a>
+<span class="sourceLineNo">395</span>            DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.395"></a>
+<span class="sourceLineNo">396</span>  }<a name="line.396"></a>
+<span class="sourceLineNo">397</span><a name="line.397"></a>
+<span class="sourceLineNo">398</span>  /**<a name="line.398"></a>
+<span class="sourceLineNo">399</span>   * @return A retry counter factory configured for retrying znode creation.<a name="line.399"></a>
+<span class="sourceLineNo">400</span>   */<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  private static RetryCounterFactory createZnodeRetryCounterFactory(Configuration conf) {<a name="line.401"></a>
+<span class="sourceLineNo">402</span>    return new RetryCounterFactory(<a name="line.402"></a>
+<span class="sourceLineNo">403</span>        conf.getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),<a name="line.403"></a>
+<span class="sourceLineNo">404</span>        conf.getInt("hbase.hbck.createznode.attempt.sleep.interval",<a name="line.404"></a>
+<span class="sourceLineNo">405</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),<a name="line.405"></a>
+<span class="sourceLineNo">406</span>        conf.getInt("hbase.hbck.createznode.attempt.maxsleeptime",<a name="line.406"></a>
+<span class="sourceLineNo">407</span>            DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));<a name="line.407"></a>
+<span class="sourceLineNo">408</span>  }<a name="line.408"></a>
+<span class="sourceLineNo">409</span><a name="line.409"></a>
+<span class="sourceLineNo">410</span>  /**<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * @return Return the tmp dir this tool writes too.<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   */<a name="line.412"></a>
+<span class="sourceLineNo">413</span>  @VisibleForTesting<a name="line.413"></a>
+<span class="sourceLineNo">414</span>  public static Path getTmpDir(Configuration conf) throws IOException {<a name="line.414"></a>
+<span class="sourceLineNo">415</span>    return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY);<a name="line.415"></a>
+<span class="sourceLineNo">416</span>  }<a name="line.416"></a>
+<span class="sourceLineNo">417</span><a name="line.417"></a>
+<span class="sourceLineNo">418</span>  private static class FileLockCallable implements Callable&lt;FSDataOutputStream&gt; {<a name="line.418"></a>
+<span class="sourceLineNo">419</span>    RetryCounter retryCounter;<a name="line.419"></a>
+<span class="sourceLineNo">420</span>    private final Configuration conf;<a name="line.420"></a>
+<span class="sourceLineNo">421</span>    private Path hbckLockPath = null;<a name="line.421"></a>
+<span class="sourceLineNo">422</span><a name="line.422"></a>
+<span class="sourceLineNo">423</span>    public FileLockCallable(Configuration conf, RetryCounter retryCounter) {<a name="line.423"></a>
+<span class="sourceLineNo">424</span>      this.retryCounter = retryCounter;<a name="line.424"></a>
+<span class="sourceLineNo">425</span>      this.conf = conf;<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    }<a name="line.426"></a>
+<span class="sourceLineNo">427</span><a name="line.427"></a>
+<span class="sourceLineNo">428</span>    /**<a name="line.428"></a>
+<span class="sourceLineNo">429</span>     * @return Will be &lt;code&gt;null&lt;/code&gt; unless you call {@link #call()}<a name="line.429"></a>
+<span class="sourceLineNo">430</span>     */<a name="line.430"></a>
+<span class="sourceLineNo">431</span>    Path getHbckLockPath() {<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      return this.hbckLockPath;<a name="line.432"></a>
+<span class="sourceLineNo">433</span>    }<a name="line.433"></a>
+<span class="sourceLineNo">434</span><a name="line.434"></a>
+<span class="sourceLineNo">435</span>    @Override<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    public FSDataOutputStream call() throws IOException {<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      try {<a name="line.437"></a>
+<span class="sourceLineNo">438</span>        FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);<a name="line.438"></a>
+<span class="sourceLineNo">439</span>        FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,<a name="line.439"></a>
+<span class="sourceLineNo">440</span>            HConstants.DATA_FILE_UMASK_KEY);<a name="line.440"></a>
+<span class="sourceLineNo">441</span>        Path tmpDir = getTmpDir(conf);<a name="line.441"></a>
+<span class="sourceLineNo">442</span>        this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);<a name="line.442"></a>
+<span class="sourceLineNo">443</span>        fs.mkdirs(tmpDir);<a name="line.443"></a>
+<span class="sourceLineNo">444</span>        final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);<a name="line.444"></a>
+<span class="sourceLineNo">445</span>        out.writeBytes(InetAddress.getLocalHost().toString());<a name="line.445"></a>
+<span class="sourceLineNo">446</span>        // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.<a name="line.446"></a>
+<span class="sourceLineNo">447</span>        out.writeBytes(" Written by an hbase-2.x Master to block an " +<a name="line.447"></a>
+<span class="sourceLineNo">448</span>            "attempt by an hbase-1.x HBCK tool making modification to state. " +<a name="line.448"></a>
+<span class="sourceLineNo">449</span>            "See 'HBCK must match HBase server version' in the hbase refguide.");<a name="line.449"></a>
+<span class="sourceLineNo">450</span>        out.flush();<a name="line.450"></a>
+<span class="sourceLineNo">451</span>        return out;<a name="line.451"></a>
+<span class="sourceLineNo">452</span>      } catch(RemoteException e) {<a name="line.452"></a>
+<span class="sourceLineNo">453</span>        if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){<a name="line.453"></a>
+<span class="sourceLineNo">454</span>          return null;<a name="line.454"></a>
+<span class="sourceLineNo">455</span>        } else {<a name="line.455"></a>
+<span class="sourceLineNo">456</span>          throw e;<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        }<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      }<a name="line.458"></a>
+<span class="sourceLineNo">459</span>    }<a name="line.459"></a>
+<span class="sourceLineNo">460</span><a name="line.460"></a>
+<span class="sourceLineNo">461</span>    private FSDataOutputStream createFileWithRetries(final FileSystem fs,<a name="line.461"></a>
+<span class="sourceLineNo">462</span>        final Path hbckLockFilePath, final FsPermission defaultPerms)<a name="line.462"></a>
+<span class="sourceLineNo">463</span>        throws IOException {<a name="line.463"></a>
+<span class="sourceLineNo">464</span>      IOException exception = null;<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      do {<a name="line.465"></a>
+<span class="sourceLineNo">466</span>        try {<a name="line.466"></a>
+<span class="sourceLineNo">467</span>          return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);<a name="line.467"></a>
+<span class="sourceLineNo">468</span>        } catch (IOException ioe) {<a name="line.468"></a>
+<span class="sourceLineNo">469</span>          LOG.info("Failed to create lock file " + hbckLockFilePath.getName()<a name="line.469"></a>
+<span class="sourceLineNo">470</span>              + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.470"></a>
+<span class="sourceLineNo">471</span>              + retryCounter.getMaxAttempts());<a name="line.471"></a>
+<span class="sourceLineNo">472</span>          LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),<a name="line.472"></a>
+<span class="sourceLineNo">473</span>              ioe);<a name="line.473"></a>
+<span class="sourceLineNo">474</span>          try {<a name="line.474"></a>
+<span class="sourceLineNo">475</span>            exception = ioe;<a name="line.475"></a>
+<span class="sourceLineNo">476</span>            retryCounter.sleepUntilNextRetry();<a name="line.476"></a>
+<span class="sourceLineNo">477</span>          } catch (InterruptedException ie) {<a name="line.477"></a>
+<span class="sourceLineNo">478</span>            throw (InterruptedIOException) new InterruptedIOException(<a name="line.478"></a>
+<span class="sourceLineNo">479</span>                "Can't create lock file " + hbckLockFilePath.getName())<a name="line.479"></a>
+<span class="sourceLineNo">480</span>            .initCause(ie);<a name="line.480"></a>
+<span class="sourceLineNo">481</span>          }<a name="line.481"></a>
+<span class="sourceLineNo">482</span>        }<a name="line.482"></a>
+<span class="sourceLineNo">483</span>      } while (retryCounter.shouldRetry());<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>      throw exception;<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  /**<a name="line.489"></a>
+<span class="sourceLineNo">490</span>   * This method maintains a lock using a file. If the creation fails we return null<a name="line.490"></a>
+<span class="sourceLineNo">491</span>   *<a name="line.491"></a>
+<span class="sourceLineNo">492</span>   * @return FSDataOutputStream object corresponding to the newly opened lock file<a name="line.492"></a>
+<span class="sourceLineNo">493</span>   * @throws IOException if IO failure occurs<a name="line.493"></a>
+<span class="sourceLineNo">494</span>   */<a name="line.494"></a>
+<span class="sourceLineNo">495</span>  public static Pair&lt;Path, FSDataOutputStream&gt; checkAndMarkRunningHbck(Configuration conf,<a name="line.495"></a>
+<span class="sourceLineNo">496</span>      RetryCounter retryCounter) throws IOException {<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    FileLockCallable callable = new FileLockCallable(conf, retryCounter);<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    ExecutorService executor = Executors.newFixedThreadPool(1);<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    FutureTask&lt;FSDataOutputStream&gt; futureTask = new FutureTask&lt;&gt;(callable);<a name="line.499"></a>
+<span class="sourceLineNo">500</span>    executor.execute(futureTask);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    final int timeoutInSeconds = conf.getInt(<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    FSDataOutputStream stream = null;<a name="line.503"></a>
+<span class="sourceLineNo">504</span>    try {<a name="line.504"></a>
+<span class="sourceLineNo">505</span>      stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    } catch (ExecutionException ee) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      LOG.warn("Encountered exception when opening lock file", ee);<a name="line.507"></a>
+<span class="sourceLineNo">508</span>    } catch (InterruptedException ie) {<a name="line.508"></a>
+<span class="sourceLineNo">509</span>      LOG.warn("Interrupted when opening lock file", ie);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      Thread.currentThread().interrupt();<a name="line.510"></a>
+<span class="sourceLineNo">511</span>    } catch (TimeoutException exception) {<a name="line.511"></a>
+<span class="sourceLineNo">512</span>      // took too long to obtain lock<a name="line.512"></a>
+<span class="sourceLineNo">513</span>      LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      futureTask.cancel(true);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } finally {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      executor.shutdownNow();<a name="line.516"></a>
+<span class="sourceLineNo">517</span>    }<a name="line.517"></a>
+<span class="sourceLineNo">518</span>    return new Pair&lt;Path, FSDataOutputStream&gt;(callable.getHbckLockPath(), stream);<a name="line.518"></a>
+<span class="sourceLineNo">519</span>  }<a name="line.519"></a>
+<span class="sourceLineNo">520</span><a name="line.520"></a>
+<span class="sourceLineNo">521</span>  private void unlockHbck() {<a name="line.521"></a>
+<span class="sourceLineNo">522</span>    if (isExclusive() &amp;&amp; hbckLockCleanup.compareAndSet(true, false)) {<a name="line.522"></a>
+<span class="sourceLineNo">523</span>      RetryCounter retryCounter = lockFileRetryCounterFactory.create();<a name="line.523"></a>
+<span class="sourceLineNo">524</span>      do {<a name="line.524"></a>
+<span class="sourceLineNo">525</span>        try {<a name="line.525"></a>
+<span class="sourceLineNo">526</span>          IOUtils.closeQuietly(hbckOutFd);<a name="line.526"></a>
+<span class="sourceLineNo">527</span>          FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true);<a name="line.527"></a>
+<span class="sourceLineNo">528</span>          LOG.info("Finishing hbck");<a name="line.528"></a>
+<span class="sourceLineNo">529</span>          return;<a name="line.529"></a>
+<span class="sourceLineNo">530</span>        } catch (IOException ioe) {<a name="line.530"></a>
+<span class="sourceLineNo">531</span>          LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="<a name="line.531"></a>
+<span class="sourceLineNo">532</span>              + (retryCounter.getAttemptTimes() + 1) + " of "<a name="line.532"></a>
+<span class="sourceLineNo">533</span>              + retryCounter.getMaxAttempts());<a name="line.533"></a>
+<span class="sourceLineNo">534</span>          LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);<a name="line.534"></a>
+<span class="sourceLineNo">535</span>          try {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>            retryCounter.sleepUntilNextRetry();<a name="line.536"></a>
+<span class="sourceLineNo">537</span>          } catch (InterruptedException ie) {<a name="line.537"></a>
+<span class="sourceLineNo">538</span>            Thread.currentThread().interrupt();<a name="line.538"></a>
+<span class="sourceLineNo">539</span>            LOG.warn("Interrupted while deleting lock file" +<a name="line.539"></a>
+<span class="sourceLineNo">540</span>                HBCK_LOCK_PATH);<a name="line.540"></a>
+<span class="sourceLineNo">541</span>            return;<a name="line.541"></a>
+<span class="sourceLineNo">542</span>          }<a name="line.542"></a>
+<span class="sourceLineNo">543</span>        }<a name="line.543"></a>
+<span class="sourceLineNo">544</span>      } while (retryCounter.shouldRetry());<a name="line.544"></a>
+<span class="sourceLineNo">545</span>    }<a name="line.545"></a>
+<span class="sourceLineNo">546</span>  }<a name="line.546"></a>
+<span class="sourceLineNo">547</span><a name="line.547"></a>
+<span class="sourceLineNo">548</span>  /**<a name="line.548"></a>
+<span class="sourceLineNo">549</span>   * To repair region consistency, one must call connect() in order to repair<a name="line.549"></a>
+<span class="sourceLineNo">550</span>   * online state.<a name="line.550"></a>
+<span class="sourceLineNo">551</span>   */<a name="line.551"></a>
+<span class="sourceLineNo">552</span>  public void connect() throws IOException {<a name="line.552"></a>
+<span class="sourceLineNo">553</span><a name="line.553"></a>
+<span class="sourceLineNo">554</span>    if (isExclusive()) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>      // Grab the

<TRUNCATED>