You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by gi...@apache.org on 2018/05/10 14:48:08 UTC

[08/34] hbase-site git commit: Published site at 8ba2a7eeb967a80d04020e30b2fe8e8652ea1758.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/15a025c1/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
----------------------------------------------------------------------
diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index 869f6a1..3b08b86 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
@@ -341,2502 +341,2502 @@
 <span class="sourceLineNo">333</span>    // a hbase checksum verification failure will cause unit tests to fail<a name="line.333"></a>
 <span class="sourceLineNo">334</span>    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);<a name="line.334"></a>
 <span class="sourceLineNo">335</span><a name="line.335"></a>
-<span class="sourceLineNo">336</span>    // if conf is provided, prevent contention for ports if other hbase thread(s) are running<a name="line.336"></a>
-<span class="sourceLineNo">337</span>    if (conf != null) {<a name="line.337"></a>
-<span class="sourceLineNo">338</span>      if (conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT)<a name="line.338"></a>
-<span class="sourceLineNo">339</span>              == HConstants.DEFAULT_MASTER_INFOPORT) {<a name="line.339"></a>
-<span class="sourceLineNo">340</span>        conf.setInt(HConstants.MASTER_INFO_PORT, -1);<a name="line.340"></a>
-<span class="sourceLineNo">341</span>        LOG.debug("Config property {} changed to -1", HConstants.MASTER_INFO_PORT);<a name="line.341"></a>
-<span class="sourceLineNo">342</span>      }<a name="line.342"></a>
-<span class="sourceLineNo">343</span>      if (conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT)<a name="line.343"></a>
-<span class="sourceLineNo">344</span>              == HConstants.DEFAULT_REGIONSERVER_PORT) {<a name="line.344"></a>
-<span class="sourceLineNo">345</span>        conf.setInt(HConstants.REGIONSERVER_PORT, -1);<a name="line.345"></a>
-<span class="sourceLineNo">346</span>        LOG.debug("Config property {} changed to -1", HConstants.REGIONSERVER_PORT);<a name="line.346"></a>
-<span class="sourceLineNo">347</span>      }<a name="line.347"></a>
-<span class="sourceLineNo">348</span>    }<a name="line.348"></a>
-<span class="sourceLineNo">349</span><a name="line.349"></a>
-<span class="sourceLineNo">350</span>    // Save this for when setting default file:// breaks things<a name="line.350"></a>
-<span class="sourceLineNo">351</span>    this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));<a name="line.351"></a>
-<span class="sourceLineNo">352</span><a name="line.352"></a>
-<span class="sourceLineNo">353</span>    // Every cluster is a local cluster until we start DFS<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    // Note that conf could be null, but this.conf will not be<a name="line.354"></a>
-<span class="sourceLineNo">355</span>    String dataTestDir = getDataTestDir().toString();<a name="line.355"></a>
-<span class="sourceLineNo">356</span>    this.conf.set("fs.defaultFS","file:///");<a name="line.356"></a>
-<span class="sourceLineNo">357</span>    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);<a name="line.357"></a>
-<span class="sourceLineNo">358</span>    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);<a name="line.358"></a>
-<span class="sourceLineNo">359</span>    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false);<a name="line.359"></a>
-<span class="sourceLineNo">360</span>  }<a name="line.360"></a>
-<span class="sourceLineNo">361</span><a name="line.361"></a>
-<span class="sourceLineNo">362</span>  /**<a name="line.362"></a>
-<span class="sourceLineNo">363</span>   * @deprecated use {@link HBaseTestingUtility#HBaseTestingUtility()} instead<a name="line.363"></a>
-<span class="sourceLineNo">364</span>   * @return a normal HBaseTestingUtility<a name="line.364"></a>
-<span class="sourceLineNo">365</span>   */<a name="line.365"></a>
-<span class="sourceLineNo">366</span>  @Deprecated<a name="line.366"></a>
-<span class="sourceLineNo">367</span>  public static HBaseTestingUtility createLocalHTU() {<a name="line.367"></a>
-<span class="sourceLineNo">368</span>    return new HBaseTestingUtility();<a name="line.368"></a>
-<span class="sourceLineNo">369</span>  }<a name="line.369"></a>
-<span class="sourceLineNo">370</span><a name="line.370"></a>
-<span class="sourceLineNo">371</span>  /**<a name="line.371"></a>
-<span class="sourceLineNo">372</span>   * @deprecated use {@link HBaseTestingUtility#HBaseTestingUtility(Configuration)} instead<a name="line.372"></a>
-<span class="sourceLineNo">373</span>   * @return a normal HBaseTestingUtility<a name="line.373"></a>
-<span class="sourceLineNo">374</span>   */<a name="line.374"></a>
-<span class="sourceLineNo">375</span>  @Deprecated<a name="line.375"></a>
-<span class="sourceLineNo">376</span>  public static HBaseTestingUtility createLocalHTU(Configuration c) {<a name="line.376"></a>
-<span class="sourceLineNo">377</span>    return new HBaseTestingUtility(c);<a name="line.377"></a>
-<span class="sourceLineNo">378</span>  }<a name="line.378"></a>
-<span class="sourceLineNo">379</span><a name="line.379"></a>
-<span class="sourceLineNo">380</span>  /**<a name="line.380"></a>
-<span class="sourceLineNo">381</span>   * Close both the region {@code r} and it's underlying WAL. For use in tests.<a name="line.381"></a>
-<span class="sourceLineNo">382</span>   */<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  public static void closeRegionAndWAL(final Region r) throws IOException {<a name="line.383"></a>
-<span class="sourceLineNo">384</span>    closeRegionAndWAL((HRegion)r);<a name="line.384"></a>
+<span class="sourceLineNo">336</span>    // Save this for when setting default file:// breaks things<a name="line.336"></a>
+<span class="sourceLineNo">337</span>    this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));<a name="line.337"></a>
+<span class="sourceLineNo">338</span><a name="line.338"></a>
+<span class="sourceLineNo">339</span>    // Every cluster is a local cluster until we start DFS<a name="line.339"></a>
+<span class="sourceLineNo">340</span>    // Note that conf could be null, but this.conf will not be<a name="line.340"></a>
+<span class="sourceLineNo">341</span>    String dataTestDir = getDataTestDir().toString();<a name="line.341"></a>
+<span class="sourceLineNo">342</span>    this.conf.set("fs.defaultFS","file:///");<a name="line.342"></a>
+<span class="sourceLineNo">343</span>    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);<a name="line.343"></a>
+<span class="sourceLineNo">344</span>    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);<a name="line.344"></a>
+<span class="sourceLineNo">345</span>    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE,false);<a name="line.345"></a>
+<span class="sourceLineNo">346</span>    // If the value for random ports isn't set set it to true, thus making<a name="line.346"></a>
+<span class="sourceLineNo">347</span>    // tests opt-out for random port assignment<a name="line.347"></a>
+<span class="sourceLineNo">348</span>    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,<a name="line.348"></a>
+<span class="sourceLineNo">349</span>        this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));<a name="line.349"></a>
+<span class="sourceLineNo">350</span>  }<a name="line.350"></a>
+<span class="sourceLineNo">351</span><a name="line.351"></a>
+<span class="sourceLineNo">352</span>  /**<a name="line.352"></a>
+<span class="sourceLineNo">353</span>   * @deprecated use {@link HBaseTestingUtility#HBaseTestingUtility()} instead<a name="line.353"></a>
+<span class="sourceLineNo">354</span>   * @return a normal HBaseTestingUtility<a name="line.354"></a>
+<span class="sourceLineNo">355</span>   */<a name="line.355"></a>
+<span class="sourceLineNo">356</span>  @Deprecated<a name="line.356"></a>
+<span class="sourceLineNo">357</span>  public static HBaseTestingUtility createLocalHTU() {<a name="line.357"></a>
+<span class="sourceLineNo">358</span>    return new HBaseTestingUtility();<a name="line.358"></a>
+<span class="sourceLineNo">359</span>  }<a name="line.359"></a>
+<span class="sourceLineNo">360</span><a name="line.360"></a>
+<span class="sourceLineNo">361</span>  /**<a name="line.361"></a>
+<span class="sourceLineNo">362</span>   * @deprecated use {@link HBaseTestingUtility#HBaseTestingUtility(Configuration)} instead<a name="line.362"></a>
+<span class="sourceLineNo">363</span>   * @return a normal HBaseTestingUtility<a name="line.363"></a>
+<span class="sourceLineNo">364</span>   */<a name="line.364"></a>
+<span class="sourceLineNo">365</span>  @Deprecated<a name="line.365"></a>
+<span class="sourceLineNo">366</span>  public static HBaseTestingUtility createLocalHTU(Configuration c) {<a name="line.366"></a>
+<span class="sourceLineNo">367</span>    return new HBaseTestingUtility(c);<a name="line.367"></a>
+<span class="sourceLineNo">368</span>  }<a name="line.368"></a>
+<span class="sourceLineNo">369</span><a name="line.369"></a>
+<span class="sourceLineNo">370</span>  /**<a name="line.370"></a>
+<span class="sourceLineNo">371</span>   * Close both the region {@code r} and it's underlying WAL. For use in tests.<a name="line.371"></a>
+<span class="sourceLineNo">372</span>   */<a name="line.372"></a>
+<span class="sourceLineNo">373</span>  public static void closeRegionAndWAL(final Region r) throws IOException {<a name="line.373"></a>
+<span class="sourceLineNo">374</span>    closeRegionAndWAL((HRegion)r);<a name="line.374"></a>
+<span class="sourceLineNo">375</span>  }<a name="line.375"></a>
+<span class="sourceLineNo">376</span><a name="line.376"></a>
+<span class="sourceLineNo">377</span>  /**<a name="line.377"></a>
+<span class="sourceLineNo">378</span>   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.<a name="line.378"></a>
+<span class="sourceLineNo">379</span>   */<a name="line.379"></a>
+<span class="sourceLineNo">380</span>  public static void closeRegionAndWAL(final HRegion r) throws IOException {<a name="line.380"></a>
+<span class="sourceLineNo">381</span>    if (r == null) return;<a name="line.381"></a>
+<span class="sourceLineNo">382</span>    r.close();<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    if (r.getWAL() == null) return;<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    r.getWAL().close();<a name="line.384"></a>
 <span class="sourceLineNo">385</span>  }<a name="line.385"></a>
 <span class="sourceLineNo">386</span><a name="line.386"></a>
 <span class="sourceLineNo">387</span>  /**<a name="line.387"></a>
-<span class="sourceLineNo">388</span>   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.<a name="line.388"></a>
-<span class="sourceLineNo">389</span>   */<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  public static void closeRegionAndWAL(final HRegion r) throws IOException {<a name="line.390"></a>
-<span class="sourceLineNo">391</span>    if (r == null) return;<a name="line.391"></a>
-<span class="sourceLineNo">392</span>    r.close();<a name="line.392"></a>
-<span class="sourceLineNo">393</span>    if (r.getWAL() == null) return;<a name="line.393"></a>
-<span class="sourceLineNo">394</span>    r.getWAL().close();<a name="line.394"></a>
-<span class="sourceLineNo">395</span>  }<a name="line.395"></a>
-<span class="sourceLineNo">396</span><a name="line.396"></a>
-<span class="sourceLineNo">397</span>  /**<a name="line.397"></a>
-<span class="sourceLineNo">398</span>   * Returns this classes's instance of {@link Configuration}.  Be careful how<a name="line.398"></a>
-<span class="sourceLineNo">399</span>   * you use the returned Configuration since {@link Connection} instances<a name="line.399"></a>
-<span class="sourceLineNo">400</span>   * can be shared.  The Map of Connections is keyed by the Configuration.  If<a name="line.400"></a>
-<span class="sourceLineNo">401</span>   * say, a Connection was being used against a cluster that had been shutdown,<a name="line.401"></a>
-<span class="sourceLineNo">402</span>   * see {@link #shutdownMiniCluster()}, then the Connection will no longer<a name="line.402"></a>
-<span class="sourceLineNo">403</span>   * be wholesome.  Rather than use the return direct, its usually best to<a name="line.403"></a>
-<span class="sourceLineNo">404</span>   * make a copy and use that.  Do<a name="line.404"></a>
-<span class="sourceLineNo">405</span>   * &lt;code&gt;Configuration c = new Configuration(INSTANCE.getConfiguration());&lt;/code&gt;<a name="line.405"></a>
-<span class="sourceLineNo">406</span>   * @return Instance of Configuration.<a name="line.406"></a>
-<span class="sourceLineNo">407</span>   */<a name="line.407"></a>
-<span class="sourceLineNo">408</span>  @Override<a name="line.408"></a>
-<span class="sourceLineNo">409</span>  public Configuration getConfiguration() {<a name="line.409"></a>
-<span class="sourceLineNo">410</span>    return super.getConfiguration();<a name="line.410"></a>
-<span class="sourceLineNo">411</span>  }<a name="line.411"></a>
-<span class="sourceLineNo">412</span><a name="line.412"></a>
-<span class="sourceLineNo">413</span>  public void setHBaseCluster(HBaseCluster hbaseCluster) {<a name="line.413"></a>
-<span class="sourceLineNo">414</span>    this.hbaseCluster = hbaseCluster;<a name="line.414"></a>
-<span class="sourceLineNo">415</span>  }<a name="line.415"></a>
-<span class="sourceLineNo">416</span><a name="line.416"></a>
-<span class="sourceLineNo">417</span>  /**<a name="line.417"></a>
-<span class="sourceLineNo">418</span>   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.<a name="line.418"></a>
-<span class="sourceLineNo">419</span>   * Give it a random name so can have many concurrent tests running if<a name="line.419"></a>
-<span class="sourceLineNo">420</span>   * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}<a name="line.420"></a>
-<span class="sourceLineNo">421</span>   * System property, as it's what minidfscluster bases<a name="line.421"></a>
-<span class="sourceLineNo">422</span>   * it data dir on.  Moding a System property is not the way to do concurrent<a name="line.422"></a>
-<span class="sourceLineNo">423</span>   * instances -- another instance could grab the temporary<a name="line.423"></a>
-<span class="sourceLineNo">424</span>   * value unintentionally -- but not anything can do about it at moment;<a name="line.424"></a>
-<span class="sourceLineNo">425</span>   * single instance only is how the minidfscluster works.<a name="line.425"></a>
-<span class="sourceLineNo">426</span>   *<a name="line.426"></a>
-<span class="sourceLineNo">427</span>   * We also create the underlying directory for<a name="line.427"></a>
-<span class="sourceLineNo">428</span>   *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values<a name="line.428"></a>
-<span class="sourceLineNo">429</span>   *  in the conf, and as a system property for hadoop.tmp.dir<a name="line.429"></a>
-<span class="sourceLineNo">430</span>   *<a name="line.430"></a>
-<span class="sourceLineNo">431</span>   * @return The calculated data test build directory, if newly-created.<a name="line.431"></a>
-<span class="sourceLineNo">432</span>   */<a name="line.432"></a>
-<span class="sourceLineNo">433</span>  @Override<a name="line.433"></a>
-<span class="sourceLineNo">434</span>  protected Path setupDataTestDir() {<a name="line.434"></a>
-<span class="sourceLineNo">435</span>    Path testPath = super.setupDataTestDir();<a name="line.435"></a>
-<span class="sourceLineNo">436</span>    if (null == testPath) {<a name="line.436"></a>
-<span class="sourceLineNo">437</span>      return null;<a name="line.437"></a>
-<span class="sourceLineNo">438</span>    }<a name="line.438"></a>
+<span class="sourceLineNo">388</span>   * Returns this classes's instance of {@link Configuration}.  Be careful how<a name="line.388"></a>
+<span class="sourceLineNo">389</span>   * you use the returned Configuration since {@link Connection} instances<a name="line.389"></a>
+<span class="sourceLineNo">390</span>   * can be shared.  The Map of Connections is keyed by the Configuration.  If<a name="line.390"></a>
+<span class="sourceLineNo">391</span>   * say, a Connection was being used against a cluster that had been shutdown,<a name="line.391"></a>
+<span class="sourceLineNo">392</span>   * see {@link #shutdownMiniCluster()}, then the Connection will no longer<a name="line.392"></a>
+<span class="sourceLineNo">393</span>   * be wholesome.  Rather than use the return direct, its usually best to<a name="line.393"></a>
+<span class="sourceLineNo">394</span>   * make a copy and use that.  Do<a name="line.394"></a>
+<span class="sourceLineNo">395</span>   * &lt;code&gt;Configuration c = new Configuration(INSTANCE.getConfiguration());&lt;/code&gt;<a name="line.395"></a>
+<span class="sourceLineNo">396</span>   * @return Instance of Configuration.<a name="line.396"></a>
+<span class="sourceLineNo">397</span>   */<a name="line.397"></a>
+<span class="sourceLineNo">398</span>  @Override<a name="line.398"></a>
+<span class="sourceLineNo">399</span>  public Configuration getConfiguration() {<a name="line.399"></a>
+<span class="sourceLineNo">400</span>    return super.getConfiguration();<a name="line.400"></a>
+<span class="sourceLineNo">401</span>  }<a name="line.401"></a>
+<span class="sourceLineNo">402</span><a name="line.402"></a>
+<span class="sourceLineNo">403</span>  public void setHBaseCluster(HBaseCluster hbaseCluster) {<a name="line.403"></a>
+<span class="sourceLineNo">404</span>    this.hbaseCluster = hbaseCluster;<a name="line.404"></a>
+<span class="sourceLineNo">405</span>  }<a name="line.405"></a>
+<span class="sourceLineNo">406</span><a name="line.406"></a>
+<span class="sourceLineNo">407</span>  /**<a name="line.407"></a>
+<span class="sourceLineNo">408</span>   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.<a name="line.408"></a>
+<span class="sourceLineNo">409</span>   * Give it a random name so can have many concurrent tests running if<a name="line.409"></a>
+<span class="sourceLineNo">410</span>   * we need to.  It needs to amend the {@link #TEST_DIRECTORY_KEY}<a name="line.410"></a>
+<span class="sourceLineNo">411</span>   * System property, as it's what minidfscluster bases<a name="line.411"></a>
+<span class="sourceLineNo">412</span>   * it data dir on.  Moding a System property is not the way to do concurrent<a name="line.412"></a>
+<span class="sourceLineNo">413</span>   * instances -- another instance could grab the temporary<a name="line.413"></a>
+<span class="sourceLineNo">414</span>   * value unintentionally -- but not anything can do about it at moment;<a name="line.414"></a>
+<span class="sourceLineNo">415</span>   * single instance only is how the minidfscluster works.<a name="line.415"></a>
+<span class="sourceLineNo">416</span>   *<a name="line.416"></a>
+<span class="sourceLineNo">417</span>   * We also create the underlying directory for<a name="line.417"></a>
+<span class="sourceLineNo">418</span>   *  hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values<a name="line.418"></a>
+<span class="sourceLineNo">419</span>   *  in the conf, and as a system property for hadoop.tmp.dir<a name="line.419"></a>
+<span class="sourceLineNo">420</span>   *<a name="line.420"></a>
+<span class="sourceLineNo">421</span>   * @return The calculated data test build directory, if newly-created.<a name="line.421"></a>
+<span class="sourceLineNo">422</span>   */<a name="line.422"></a>
+<span class="sourceLineNo">423</span>  @Override<a name="line.423"></a>
+<span class="sourceLineNo">424</span>  protected Path setupDataTestDir() {<a name="line.424"></a>
+<span class="sourceLineNo">425</span>    Path testPath = super.setupDataTestDir();<a name="line.425"></a>
+<span class="sourceLineNo">426</span>    if (null == testPath) {<a name="line.426"></a>
+<span class="sourceLineNo">427</span>      return null;<a name="line.427"></a>
+<span class="sourceLineNo">428</span>    }<a name="line.428"></a>
+<span class="sourceLineNo">429</span><a name="line.429"></a>
+<span class="sourceLineNo">430</span>    createSubDirAndSystemProperty(<a name="line.430"></a>
+<span class="sourceLineNo">431</span>      "hadoop.log.dir",<a name="line.431"></a>
+<span class="sourceLineNo">432</span>      testPath, "hadoop-log-dir");<a name="line.432"></a>
+<span class="sourceLineNo">433</span><a name="line.433"></a>
+<span class="sourceLineNo">434</span>    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but<a name="line.434"></a>
+<span class="sourceLineNo">435</span>    //  we want our own value to ensure uniqueness on the same machine<a name="line.435"></a>
+<span class="sourceLineNo">436</span>    createSubDirAndSystemProperty(<a name="line.436"></a>
+<span class="sourceLineNo">437</span>      "hadoop.tmp.dir",<a name="line.437"></a>
+<span class="sourceLineNo">438</span>      testPath, "hadoop-tmp-dir");<a name="line.438"></a>
 <span class="sourceLineNo">439</span><a name="line.439"></a>
-<span class="sourceLineNo">440</span>    createSubDirAndSystemProperty(<a name="line.440"></a>
-<span class="sourceLineNo">441</span>      "hadoop.log.dir",<a name="line.441"></a>
-<span class="sourceLineNo">442</span>      testPath, "hadoop-log-dir");<a name="line.442"></a>
-<span class="sourceLineNo">443</span><a name="line.443"></a>
-<span class="sourceLineNo">444</span>    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but<a name="line.444"></a>
-<span class="sourceLineNo">445</span>    //  we want our own value to ensure uniqueness on the same machine<a name="line.445"></a>
-<span class="sourceLineNo">446</span>    createSubDirAndSystemProperty(<a name="line.446"></a>
-<span class="sourceLineNo">447</span>      "hadoop.tmp.dir",<a name="line.447"></a>
-<span class="sourceLineNo">448</span>      testPath, "hadoop-tmp-dir");<a name="line.448"></a>
-<span class="sourceLineNo">449</span><a name="line.449"></a>
-<span class="sourceLineNo">450</span>    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster<a name="line.450"></a>
-<span class="sourceLineNo">451</span>    createSubDir(<a name="line.451"></a>
-<span class="sourceLineNo">452</span>      "mapreduce.cluster.local.dir",<a name="line.452"></a>
-<span class="sourceLineNo">453</span>      testPath, "mapred-local-dir");<a name="line.453"></a>
-<span class="sourceLineNo">454</span><a name="line.454"></a>
-<span class="sourceLineNo">455</span>    return testPath;<a name="line.455"></a>
-<span class="sourceLineNo">456</span>  }<a name="line.456"></a>
-<span class="sourceLineNo">457</span><a name="line.457"></a>
-<span class="sourceLineNo">458</span>  private void createSubDirAndSystemProperty(<a name="line.458"></a>
-<span class="sourceLineNo">459</span>    String propertyName, Path parent, String subDirName){<a name="line.459"></a>
-<span class="sourceLineNo">460</span><a name="line.460"></a>
-<span class="sourceLineNo">461</span>    String sysValue = System.getProperty(propertyName);<a name="line.461"></a>
-<span class="sourceLineNo">462</span><a name="line.462"></a>
-<span class="sourceLineNo">463</span>    if (sysValue != null) {<a name="line.463"></a>
-<span class="sourceLineNo">464</span>      // There is already a value set. So we do nothing but hope<a name="line.464"></a>
-<span class="sourceLineNo">465</span>      //  that there will be no conflicts<a name="line.465"></a>
-<span class="sourceLineNo">466</span>      LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+<a name="line.466"></a>
-<span class="sourceLineNo">467</span>        sysValue + " so I do NOT create it in " + parent);<a name="line.467"></a>
-<span class="sourceLineNo">468</span>      String confValue = conf.get(propertyName);<a name="line.468"></a>
-<span class="sourceLineNo">469</span>      if (confValue != null &amp;&amp; !confValue.endsWith(sysValue)){<a name="line.469"></a>
-<span class="sourceLineNo">470</span>       LOG.warn(<a name="line.470"></a>
-<span class="sourceLineNo">471</span>         propertyName + " property value differs in configuration and system: "+<a name="line.471"></a>
-<span class="sourceLineNo">472</span>         "Configuration="+confValue+" while System="+sysValue+<a name="line.472"></a>
-<span class="sourceLineNo">473</span>         " Erasing configuration value by system value."<a name="line.473"></a>
-<span class="sourceLineNo">474</span>       );<a name="line.474"></a>
-<span class="sourceLineNo">475</span>      }<a name="line.475"></a>
-<span class="sourceLineNo">476</span>      conf.set(propertyName, sysValue);<a name="line.476"></a>
-<span class="sourceLineNo">477</span>    } else {<a name="line.477"></a>
-<span class="sourceLineNo">478</span>      // Ok, it's not set, so we create it as a subdirectory<a name="line.478"></a>
-<span class="sourceLineNo">479</span>      createSubDir(propertyName, parent, subDirName);<a name="line.479"></a>
-<span class="sourceLineNo">480</span>      System.setProperty(propertyName, conf.get(propertyName));<a name="line.480"></a>
-<span class="sourceLineNo">481</span>    }<a name="line.481"></a>
-<span class="sourceLineNo">482</span>  }<a name="line.482"></a>
-<span class="sourceLineNo">483</span><a name="line.483"></a>
-<span class="sourceLineNo">484</span>  /**<a name="line.484"></a>
-<span class="sourceLineNo">485</span>   * @return Where to write test data on the test filesystem; Returns working directory<a name="line.485"></a>
-<span class="sourceLineNo">486</span>   * for the test filesystem by default<a name="line.486"></a>
-<span class="sourceLineNo">487</span>   * @see #setupDataTestDirOnTestFS()<a name="line.487"></a>
-<span class="sourceLineNo">488</span>   * @see #getTestFileSystem()<a name="line.488"></a>
+<span class="sourceLineNo">440</span>    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster<a name="line.440"></a>
+<span class="sourceLineNo">441</span>    createSubDir(<a name="line.441"></a>
+<span class="sourceLineNo">442</span>      "mapreduce.cluster.local.dir",<a name="line.442"></a>
+<span class="sourceLineNo">443</span>      testPath, "mapred-local-dir");<a name="line.443"></a>
+<span class="sourceLineNo">444</span><a name="line.444"></a>
+<span class="sourceLineNo">445</span>    return testPath;<a name="line.445"></a>
+<span class="sourceLineNo">446</span>  }<a name="line.446"></a>
+<span class="sourceLineNo">447</span><a name="line.447"></a>
+<span class="sourceLineNo">448</span>  private void createSubDirAndSystemProperty(<a name="line.448"></a>
+<span class="sourceLineNo">449</span>    String propertyName, Path parent, String subDirName){<a name="line.449"></a>
+<span class="sourceLineNo">450</span><a name="line.450"></a>
+<span class="sourceLineNo">451</span>    String sysValue = System.getProperty(propertyName);<a name="line.451"></a>
+<span class="sourceLineNo">452</span><a name="line.452"></a>
+<span class="sourceLineNo">453</span>    if (sysValue != null) {<a name="line.453"></a>
+<span class="sourceLineNo">454</span>      // There is already a value set. So we do nothing but hope<a name="line.454"></a>
+<span class="sourceLineNo">455</span>      //  that there will be no conflicts<a name="line.455"></a>
+<span class="sourceLineNo">456</span>      LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+<a name="line.456"></a>
+<span class="sourceLineNo">457</span>        sysValue + " so I do NOT create it in " + parent);<a name="line.457"></a>
+<span class="sourceLineNo">458</span>      String confValue = conf.get(propertyName);<a name="line.458"></a>
+<span class="sourceLineNo">459</span>      if (confValue != null &amp;&amp; !confValue.endsWith(sysValue)){<a name="line.459"></a>
+<span class="sourceLineNo">460</span>       LOG.warn(<a name="line.460"></a>
+<span class="sourceLineNo">461</span>         propertyName + " property value differs in configuration and system: "+<a name="line.461"></a>
+<span class="sourceLineNo">462</span>         "Configuration="+confValue+" while System="+sysValue+<a name="line.462"></a>
+<span class="sourceLineNo">463</span>         " Erasing configuration value by system value."<a name="line.463"></a>
+<span class="sourceLineNo">464</span>       );<a name="line.464"></a>
+<span class="sourceLineNo">465</span>      }<a name="line.465"></a>
+<span class="sourceLineNo">466</span>      conf.set(propertyName, sysValue);<a name="line.466"></a>
+<span class="sourceLineNo">467</span>    } else {<a name="line.467"></a>
+<span class="sourceLineNo">468</span>      // Ok, it's not set, so we create it as a subdirectory<a name="line.468"></a>
+<span class="sourceLineNo">469</span>      createSubDir(propertyName, parent, subDirName);<a name="line.469"></a>
+<span class="sourceLineNo">470</span>      System.setProperty(propertyName, conf.get(propertyName));<a name="line.470"></a>
+<span class="sourceLineNo">471</span>    }<a name="line.471"></a>
+<span class="sourceLineNo">472</span>  }<a name="line.472"></a>
+<span class="sourceLineNo">473</span><a name="line.473"></a>
+<span class="sourceLineNo">474</span>  /**<a name="line.474"></a>
+<span class="sourceLineNo">475</span>   * @return Where to write test data on the test filesystem; Returns working directory<a name="line.475"></a>
+<span class="sourceLineNo">476</span>   * for the test filesystem by default<a name="line.476"></a>
+<span class="sourceLineNo">477</span>   * @see #setupDataTestDirOnTestFS()<a name="line.477"></a>
+<span class="sourceLineNo">478</span>   * @see #getTestFileSystem()<a name="line.478"></a>
+<span class="sourceLineNo">479</span>   */<a name="line.479"></a>
+<span class="sourceLineNo">480</span>  private Path getBaseTestDirOnTestFS() throws IOException {<a name="line.480"></a>
+<span class="sourceLineNo">481</span>    FileSystem fs = getTestFileSystem();<a name="line.481"></a>
+<span class="sourceLineNo">482</span>    return new Path(fs.getWorkingDirectory(), "test-data");<a name="line.482"></a>
+<span class="sourceLineNo">483</span>  }<a name="line.483"></a>
+<span class="sourceLineNo">484</span><a name="line.484"></a>
+<span class="sourceLineNo">485</span>  /**<a name="line.485"></a>
+<span class="sourceLineNo">486</span>   * @return META table descriptor<a name="line.486"></a>
+<span class="sourceLineNo">487</span>   * @deprecated since 2.0 version and will be removed in 3.0 version.<a name="line.487"></a>
+<span class="sourceLineNo">488</span>   *             use {@link #getMetaTableDescriptorBuilder()}<a name="line.488"></a>
 <span class="sourceLineNo">489</span>   */<a name="line.489"></a>
-<span class="sourceLineNo">490</span>  private Path getBaseTestDirOnTestFS() throws IOException {<a name="line.490"></a>
-<span class="sourceLineNo">491</span>    FileSystem fs = getTestFileSystem();<a name="line.491"></a>
-<span class="sourceLineNo">492</span>    return new Path(fs.getWorkingDirectory(), "test-data");<a name="line.492"></a>
+<span class="sourceLineNo">490</span>  @Deprecated<a name="line.490"></a>
+<span class="sourceLineNo">491</span>  public HTableDescriptor getMetaTableDescriptor() {<a name="line.491"></a>
+<span class="sourceLineNo">492</span>    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());<a name="line.492"></a>
 <span class="sourceLineNo">493</span>  }<a name="line.493"></a>
 <span class="sourceLineNo">494</span><a name="line.494"></a>
 <span class="sourceLineNo">495</span>  /**<a name="line.495"></a>
 <span class="sourceLineNo">496</span>   * @return META table descriptor<a name="line.496"></a>
-<span class="sourceLineNo">497</span>   * @deprecated since 2.0 version and will be removed in 3.0 version.<a name="line.497"></a>
-<span class="sourceLineNo">498</span>   *             use {@link #getMetaTableDescriptorBuilder()}<a name="line.498"></a>
-<span class="sourceLineNo">499</span>   */<a name="line.499"></a>
-<span class="sourceLineNo">500</span>  @Deprecated<a name="line.500"></a>
-<span class="sourceLineNo">501</span>  public HTableDescriptor getMetaTableDescriptor() {<a name="line.501"></a>
-<span class="sourceLineNo">502</span>    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());<a name="line.502"></a>
-<span class="sourceLineNo">503</span>  }<a name="line.503"></a>
-<span class="sourceLineNo">504</span><a name="line.504"></a>
-<span class="sourceLineNo">505</span>  /**<a name="line.505"></a>
-<span class="sourceLineNo">506</span>   * @return META table descriptor<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   */<a name="line.507"></a>
-<span class="sourceLineNo">508</span>  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {<a name="line.508"></a>
-<span class="sourceLineNo">509</span>    try {<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);<a name="line.510"></a>
-<span class="sourceLineNo">511</span>    } catch (IOException e) {<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      throw new RuntimeException("Unable to create META table descriptor", e);<a name="line.512"></a>
-<span class="sourceLineNo">513</span>    }<a name="line.513"></a>
-<span class="sourceLineNo">514</span>  }<a name="line.514"></a>
-<span class="sourceLineNo">515</span><a name="line.515"></a>
-<span class="sourceLineNo">516</span>  /**<a name="line.516"></a>
-<span class="sourceLineNo">517</span>   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}<a name="line.517"></a>
-<span class="sourceLineNo">518</span>   * to write temporary test data. Call this method after setting up the mini dfs cluster<a name="line.518"></a>
-<span class="sourceLineNo">519</span>   * if the test relies on it.<a name="line.519"></a>
-<span class="sourceLineNo">520</span>   * @return a unique path in the test filesystem<a name="line.520"></a>
-<span class="sourceLineNo">521</span>   */<a name="line.521"></a>
-<span class="sourceLineNo">522</span>  public Path getDataTestDirOnTestFS() throws IOException {<a name="line.522"></a>
-<span class="sourceLineNo">523</span>    if (dataTestDirOnTestFS == null) {<a name="line.523"></a>
-<span class="sourceLineNo">524</span>      setupDataTestDirOnTestFS();<a name="line.524"></a>
-<span class="sourceLineNo">525</span>    }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span>    return dataTestDirOnTestFS;<a name="line.527"></a>
-<span class="sourceLineNo">528</span>  }<a name="line.528"></a>
-<span class="sourceLineNo">529</span><a name="line.529"></a>
-<span class="sourceLineNo">530</span>  /**<a name="line.530"></a>
-<span class="sourceLineNo">531</span>   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}<a name="line.531"></a>
-<span class="sourceLineNo">532</span>   * to write temporary test data. Call this method after setting up the mini dfs cluster<a name="line.532"></a>
-<span class="sourceLineNo">533</span>   * if the test relies on it.<a name="line.533"></a>
-<span class="sourceLineNo">534</span>   * @return a unique path in the test filesystem<a name="line.534"></a>
-<span class="sourceLineNo">535</span>   * @param subdirName name of the subdir to create under the base test dir<a name="line.535"></a>
-<span class="sourceLineNo">536</span>   */<a name="line.536"></a>
-<span class="sourceLineNo">537</span>  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {<a name="line.537"></a>
-<span class="sourceLineNo">538</span>    return new Path(getDataTestDirOnTestFS(), subdirName);<a name="line.538"></a>
-<span class="sourceLineNo">539</span>  }<a name="line.539"></a>
-<span class="sourceLineNo">540</span><a name="line.540"></a>
-<span class="sourceLineNo">541</span>  /**<a name="line.541"></a>
-<span class="sourceLineNo">542</span>   * Sets up a path in test filesystem to be used by tests.<a name="line.542"></a>
-<span class="sourceLineNo">543</span>   * Creates a new directory if not already setup.<a name="line.543"></a>
-<span class="sourceLineNo">544</span>   */<a name="line.544"></a>
-<span class="sourceLineNo">545</span>  private void setupDataTestDirOnTestFS() throws IOException {<a name="line.545"></a>
-<span class="sourceLineNo">546</span>    if (dataTestDirOnTestFS != null) {<a name="line.546"></a>
-<span class="sourceLineNo">547</span>      LOG.warn("Data test on test fs dir already setup in "<a name="line.547"></a>
-<span class="sourceLineNo">548</span>          + dataTestDirOnTestFS.toString());<a name="line.548"></a>
-<span class="sourceLineNo">549</span>      return;<a name="line.549"></a>
-<span class="sourceLineNo">550</span>    }<a name="line.550"></a>
-<span class="sourceLineNo">551</span>    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();<a name="line.551"></a>
-<span class="sourceLineNo">552</span>  }<a name="line.552"></a>
-<span class="sourceLineNo">553</span><a name="line.553"></a>
-<span class="sourceLineNo">554</span>  /**<a name="line.554"></a>
-<span class="sourceLineNo">555</span>   * Sets up a new path in test filesystem to be used by tests.<a name="line.555"></a>
-<span class="sourceLineNo">556</span>   */<a name="line.556"></a>
-<span class="sourceLineNo">557</span>  private Path getNewDataTestDirOnTestFS() throws IOException {<a name="line.557"></a>
-<span class="sourceLineNo">558</span>    //The file system can be either local, mini dfs, or if the configuration<a name="line.558"></a>
-<span class="sourceLineNo">559</span>    //is supplied externally, it can be an external cluster FS. If it is a local<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    //file system, the tests should use getBaseTestDir, otherwise, we can use<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    //the working directory, and create a unique sub dir there<a name="line.561"></a>
-<span class="sourceLineNo">562</span>    FileSystem fs = getTestFileSystem();<a name="line.562"></a>
-<span class="sourceLineNo">563</span>    Path newDataTestDir;<a name="line.563"></a>
-<span class="sourceLineNo">564</span>    String randomStr = UUID.randomUUID().toString();<a name="line.564"></a>
-<span class="sourceLineNo">565</span>    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {<a name="line.565"></a>
-<span class="sourceLineNo">566</span>      newDataTestDir = new Path(getDataTestDir(), randomStr);<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      File dataTestDir = new File(newDataTestDir.toString());<a name="line.567"></a>
-<span class="sourceLineNo">568</span>      if (deleteOnExit()) dataTestDir.deleteOnExit();<a name="line.568"></a>
-<span class="sourceLineNo">569</span>    } else {<a name="line.569"></a>
-<span class="sourceLineNo">570</span>      Path base = getBaseTestDirOnTestFS();<a name="line.570"></a>
-<span class="sourceLineNo">571</span>      newDataTestDir = new Path(base, randomStr);<a name="line.571"></a>
-<span class="sourceLineNo">572</span>      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);<a name="line.572"></a>
-<span class="sourceLineNo">573</span>    }<a name="line.573"></a>
-<span class="sourceLineNo">574</span>    return newDataTestDir;<a name="line.574"></a>
-<span class="sourceLineNo">575</span>  }<a name="line.575"></a>
-<span class="sourceLineNo">576</span><a name="line.576"></a>
-<span class="sourceLineNo">577</span>  /**<a name="line.577"></a>
-<span class="sourceLineNo">578</span>   * Cleans the test data directory on the test filesystem.<a name="line.578"></a>
-<span class="sourceLineNo">579</span>   * @return True if we removed the test dirs<a name="line.579"></a>
-<span class="sourceLineNo">580</span>   * @throws IOException<a name="line.580"></a>
-<span class="sourceLineNo">581</span>   */<a name="line.581"></a>
-<span class="sourceLineNo">582</span>  public boolean cleanupDataTestDirOnTestFS() throws IOException {<a name="line.582"></a>
-<span class="sourceLineNo">583</span>    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);<a name="line.583"></a>
-<span class="sourceLineNo">584</span>    if (ret)<a name="line.584"></a>
-<span class="sourceLineNo">585</span>      dataTestDirOnTestFS = null;<a name="line.585"></a>
-<span class="sourceLineNo">586</span>    return ret;<a name="line.586"></a>
+<span class="sourceLineNo">497</span>   */<a name="line.497"></a>
+<span class="sourceLineNo">498</span>  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {<a name="line.498"></a>
+<span class="sourceLineNo">499</span>    try {<a name="line.499"></a>
+<span class="sourceLineNo">500</span>      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);<a name="line.500"></a>
+<span class="sourceLineNo">501</span>    } catch (IOException e) {<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      throw new RuntimeException("Unable to create META table descriptor", e);<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    }<a name="line.503"></a>
+<span class="sourceLineNo">504</span>  }<a name="line.504"></a>
+<span class="sourceLineNo">505</span><a name="line.505"></a>
+<span class="sourceLineNo">506</span>  /**<a name="line.506"></a>
+<span class="sourceLineNo">507</span>   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}<a name="line.507"></a>
+<span class="sourceLineNo">508</span>   * to write temporary test data. Call this method after setting up the mini dfs cluster<a name="line.508"></a>
+<span class="sourceLineNo">509</span>   * if the test relies on it.<a name="line.509"></a>
+<span class="sourceLineNo">510</span>   * @return a unique path in the test filesystem<a name="line.510"></a>
+<span class="sourceLineNo">511</span>   */<a name="line.511"></a>
+<span class="sourceLineNo">512</span>  public Path getDataTestDirOnTestFS() throws IOException {<a name="line.512"></a>
+<span class="sourceLineNo">513</span>    if (dataTestDirOnTestFS == null) {<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      setupDataTestDirOnTestFS();<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    }<a name="line.515"></a>
+<span class="sourceLineNo">516</span><a name="line.516"></a>
+<span class="sourceLineNo">517</span>    return dataTestDirOnTestFS;<a name="line.517"></a>
+<span class="sourceLineNo">518</span>  }<a name="line.518"></a>
+<span class="sourceLineNo">519</span><a name="line.519"></a>
+<span class="sourceLineNo">520</span>  /**<a name="line.520"></a>
+<span class="sourceLineNo">521</span>   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}<a name="line.521"></a>
+<span class="sourceLineNo">522</span>   * to write temporary test data. Call this method after setting up the mini dfs cluster<a name="line.522"></a>
+<span class="sourceLineNo">523</span>   * if the test relies on it.<a name="line.523"></a>
+<span class="sourceLineNo">524</span>   * @return a unique path in the test filesystem<a name="line.524"></a>
+<span class="sourceLineNo">525</span>   * @param subdirName name of the subdir to create under the base test dir<a name="line.525"></a>
+<span class="sourceLineNo">526</span>   */<a name="line.526"></a>
+<span class="sourceLineNo">527</span>  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {<a name="line.527"></a>
+<span class="sourceLineNo">528</span>    return new Path(getDataTestDirOnTestFS(), subdirName);<a name="line.528"></a>
+<span class="sourceLineNo">529</span>  }<a name="line.529"></a>
+<span class="sourceLineNo">530</span><a name="line.530"></a>
+<span class="sourceLineNo">531</span>  /**<a name="line.531"></a>
+<span class="sourceLineNo">532</span>   * Sets up a path in test filesystem to be used by tests.<a name="line.532"></a>
+<span class="sourceLineNo">533</span>   * Creates a new directory if not already setup.<a name="line.533"></a>
+<span class="sourceLineNo">534</span>   */<a name="line.534"></a>
+<span class="sourceLineNo">535</span>  private void setupDataTestDirOnTestFS() throws IOException {<a name="line.535"></a>
+<span class="sourceLineNo">536</span>    if (dataTestDirOnTestFS != null) {<a name="line.536"></a>
+<span class="sourceLineNo">537</span>      LOG.warn("Data test on test fs dir already setup in "<a name="line.537"></a>
+<span class="sourceLineNo">538</span>          + dataTestDirOnTestFS.toString());<a name="line.538"></a>
+<span class="sourceLineNo">539</span>      return;<a name="line.539"></a>
+<span class="sourceLineNo">540</span>    }<a name="line.540"></a>
+<span class="sourceLineNo">541</span>    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();<a name="line.541"></a>
+<span class="sourceLineNo">542</span>  }<a name="line.542"></a>
+<span class="sourceLineNo">543</span><a name="line.543"></a>
+<span class="sourceLineNo">544</span>  /**<a name="line.544"></a>
+<span class="sourceLineNo">545</span>   * Sets up a new path in test filesystem to be used by tests.<a name="line.545"></a>
+<span class="sourceLineNo">546</span>   */<a name="line.546"></a>
+<span class="sourceLineNo">547</span>  private Path getNewDataTestDirOnTestFS() throws IOException {<a name="line.547"></a>
+<span class="sourceLineNo">548</span>    //The file system can be either local, mini dfs, or if the configuration<a name="line.548"></a>
+<span class="sourceLineNo">549</span>    //is supplied externally, it can be an external cluster FS. If it is a local<a name="line.549"></a>
+<span class="sourceLineNo">550</span>    //file system, the tests should use getBaseTestDir, otherwise, we can use<a name="line.550"></a>
+<span class="sourceLineNo">551</span>    //the working directory, and create a unique sub dir there<a name="line.551"></a>
+<span class="sourceLineNo">552</span>    FileSystem fs = getTestFileSystem();<a name="line.552"></a>
+<span class="sourceLineNo">553</span>    Path newDataTestDir;<a name="line.553"></a>
+<span class="sourceLineNo">554</span>    String randomStr = UUID.randomUUID().toString();<a name="line.554"></a>
+<span class="sourceLineNo">555</span>    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {<a name="line.555"></a>
+<span class="sourceLineNo">556</span>      newDataTestDir = new Path(getDataTestDir(), randomStr);<a name="line.556"></a>
+<span class="sourceLineNo">557</span>      File dataTestDir = new File(newDataTestDir.toString());<a name="line.557"></a>
+<span class="sourceLineNo">558</span>      if (deleteOnExit()) dataTestDir.deleteOnExit();<a name="line.558"></a>
+<span class="sourceLineNo">559</span>    } else {<a name="line.559"></a>
+<span class="sourceLineNo">560</span>      Path base = getBaseTestDirOnTestFS();<a name="line.560"></a>
+<span class="sourceLineNo">561</span>      newDataTestDir = new Path(base, randomStr);<a name="line.561"></a>
+<span class="sourceLineNo">562</span>      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);<a name="line.562"></a>
+<span class="sourceLineNo">563</span>    }<a name="line.563"></a>
+<span class="sourceLineNo">564</span>    return newDataTestDir;<a name="line.564"></a>
+<span class="sourceLineNo">565</span>  }<a name="line.565"></a>
+<span class="sourceLineNo">566</span><a name="line.566"></a>
+<span class="sourceLineNo">567</span>  /**<a name="line.567"></a>
+<span class="sourceLineNo">568</span>   * Cleans the test data directory on the test filesystem.<a name="line.568"></a>
+<span class="sourceLineNo">569</span>   * @return True if we removed the test dirs<a name="line.569"></a>
+<span class="sourceLineNo">570</span>   * @throws IOException<a name="line.570"></a>
+<span class="sourceLineNo">571</span>   */<a name="line.571"></a>
+<span class="sourceLineNo">572</span>  public boolean cleanupDataTestDirOnTestFS() throws IOException {<a name="line.572"></a>
+<span class="sourceLineNo">573</span>    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);<a name="line.573"></a>
+<span class="sourceLineNo">574</span>    if (ret)<a name="line.574"></a>
+<span class="sourceLineNo">575</span>      dataTestDirOnTestFS = null;<a name="line.575"></a>
+<span class="sourceLineNo">576</span>    return ret;<a name="line.576"></a>
+<span class="sourceLineNo">577</span>  }<a name="line.577"></a>
+<span class="sourceLineNo">578</span><a name="line.578"></a>
+<span class="sourceLineNo">579</span>  /**<a name="line.579"></a>
+<span class="sourceLineNo">580</span>   * Cleans a subdirectory under the test data directory on the test filesystem.<a name="line.580"></a>
+<span class="sourceLineNo">581</span>   * @return True if we removed child<a name="line.581"></a>
+<span class="sourceLineNo">582</span>   * @throws IOException<a name="line.582"></a>
+<span class="sourceLineNo">583</span>   */<a name="line.583"></a>
+<span class="sourceLineNo">584</span>  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {<a name="line.584"></a>
+<span class="sourceLineNo">585</span>    Path cpath = getDataTestDirOnTestFS(subdirName);<a name="line.585"></a>
+<span class="sourceLineNo">586</span>    return getTestFileSystem().delete(cpath, true);<a name="line.586"></a>
 <span class="sourceLineNo">587</span>  }<a name="line.587"></a>
 <span class="sourceLineNo">588</span><a name="line.588"></a>
 <span class="sourceLineNo">589</span>  /**<a name="line.589"></a>
-<span class="sourceLineNo">590</span>   * Cleans a subdirectory under the test data directory on the test filesystem.<a name="line.590"></a>
-<span class="sourceLineNo">591</span>   * @return True if we removed child<a name="line.591"></a>
-<span class="sourceLineNo">592</span>   * @throws IOException<a name="line.592"></a>
-<span class="sourceLineNo">593</span>   */<a name="line.593"></a>
-<span class="sourceLineNo">594</span>  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {<a name="line.594"></a>
-<span class="sourceLineNo">595</span>    Path cpath = getDataTestDirOnTestFS(subdirName);<a name="line.595"></a>
-<span class="sourceLineNo">596</span>    return getTestFileSystem().delete(cpath, true);<a name="line.596"></a>
-<span class="sourceLineNo">597</span>  }<a name="line.597"></a>
-<span class="sourceLineNo">598</span><a name="line.598"></a>
-<span class="sourceLineNo">599</span>  /**<a name="line.599"></a>
-<span class="sourceLineNo">600</span>   * Start a minidfscluster.<a name="line.600"></a>
-<span class="sourceLineNo">601</span>   * @param servers How many DNs to start.<a name="line.601"></a>
-<span class="sourceLineNo">602</span>   * @throws Exception<a name="line.602"></a>
-<span class="sourceLineNo">603</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.603"></a>
-<span class="sourceLineNo">604</span>   * @return The mini dfs cluster created.<a name="line.604"></a>
-<span class="sourceLineNo">605</span>   */<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {<a name="line.606"></a>
-<span class="sourceLineNo">607</span>    return startMiniDFSCluster(servers, null);<a name="line.607"></a>
-<span class="sourceLineNo">608</span>  }<a name="line.608"></a>
-<span class="sourceLineNo">609</span><a name="line.609"></a>
-<span class="sourceLineNo">610</span>  /**<a name="line.610"></a>
-<span class="sourceLineNo">611</span>   * Start a minidfscluster.<a name="line.611"></a>
-<span class="sourceLineNo">612</span>   * This is useful if you want to run datanode on distinct hosts for things<a name="line.612"></a>
-<span class="sourceLineNo">613</span>   * like HDFS block location verification.<a name="line.613"></a>
-<span class="sourceLineNo">614</span>   * If you start MiniDFSCluster without host names, all instances of the<a name="line.614"></a>
-<span class="sourceLineNo">615</span>   * datanodes will have the same host name.<a name="line.615"></a>
-<span class="sourceLineNo">616</span>   * @param hosts hostnames DNs to run on.<a name="line.616"></a>
-<span class="sourceLineNo">617</span>   * @throws Exception<a name="line.617"></a>
-<span class="sourceLineNo">618</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.618"></a>
-<span class="sourceLineNo">619</span>   * @return The mini dfs cluster created.<a name="line.619"></a>
-<span class="sourceLineNo">620</span>   */<a name="line.620"></a>
-<span class="sourceLineNo">621</span>  public MiniDFSCluster startMiniDFSCluster(final String hosts[])<a name="line.621"></a>
-<span class="sourceLineNo">622</span>  throws Exception {<a name="line.622"></a>
-<span class="sourceLineNo">623</span>    if ( hosts != null &amp;&amp; hosts.length != 0) {<a name="line.623"></a>
-<span class="sourceLineNo">624</span>      return startMiniDFSCluster(hosts.length, hosts);<a name="line.624"></a>
-<span class="sourceLineNo">625</span>    } else {<a name="line.625"></a>
-<span class="sourceLineNo">626</span>      return startMiniDFSCluster(1, null);<a name="line.626"></a>
-<span class="sourceLineNo">627</span>    }<a name="line.627"></a>
-<span class="sourceLineNo">628</span>  }<a name="line.628"></a>
-<span class="sourceLineNo">629</span><a name="line.629"></a>
-<span class="sourceLineNo">630</span>  /**<a name="line.630"></a>
-<span class="sourceLineNo">631</span>   * Start a minidfscluster.<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   * Can only create one.<a name="line.632"></a>
-<span class="sourceLineNo">633</span>   * @param servers How many DNs to start.<a name="line.633"></a>
-<span class="sourceLineNo">634</span>   * @param hosts hostnames DNs to run on.<a name="line.634"></a>
-<span class="sourceLineNo">635</span>   * @throws Exception<a name="line.635"></a>
-<span class="sourceLineNo">636</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.636"></a>
-<span class="sourceLineNo">637</span>   * @return The mini dfs cluster created.<a name="line.637"></a>
-<span class="sourceLineNo">638</span>   */<a name="line.638"></a>
-<span class="sourceLineNo">639</span>  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])<a name="line.639"></a>
-<span class="sourceLineNo">640</span>  throws Exception {<a name="line.640"></a>
-<span class="sourceLineNo">641</span>    return startMiniDFSCluster(servers, null, hosts);<a name="line.641"></a>
-<span class="sourceLineNo">642</span>  }<a name="line.642"></a>
-<span class="sourceLineNo">643</span><a name="line.643"></a>
-<span class="sourceLineNo">644</span>  private void setFs() throws IOException {<a name="line.644"></a>
-<span class="sourceLineNo">645</span>    if(this.dfsCluster == null){<a name="line.645"></a>
-<span class="sourceLineNo">646</span>      LOG.info("Skipping setting fs because dfsCluster is null");<a name="line.646"></a>
-<span class="sourceLineNo">647</span>      return;<a name="line.647"></a>
-<span class="sourceLineNo">648</span>    }<a name="line.648"></a>
-<span class="sourceLineNo">649</span>    FileSystem fs = this.dfsCluster.getFileSystem();<a name="line.649"></a>
-<span class="sourceLineNo">650</span>    FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));<a name="line.650"></a>
-<span class="sourceLineNo">651</span><a name="line.651"></a>
-<span class="sourceLineNo">652</span>    // re-enable this check with dfs<a name="line.652"></a>
-<span class="sourceLineNo">653</span>    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);<a name="line.653"></a>
-<span class="sourceLineNo">654</span>  }<a name="line.654"></a>
-<span class="sourceLineNo">655</span><a name="line.655"></a>
-<span class="sourceLineNo">656</span>  public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])<a name="line.656"></a>
-<span class="sourceLineNo">657</span>      throws Exception {<a name="line.657"></a>
-<span class="sourceLineNo">658</span>    createDirsAndSetProperties();<a name="line.658"></a>
-<span class="sourceLineNo">659</span>    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);<a name="line.659"></a>
-<span class="sourceLineNo">660</span><a name="line.660"></a>
-<span class="sourceLineNo">661</span>    // Error level to skip some warnings specific to the minicluster. See HBASE-4709<a name="line.661"></a>
-<span class="sourceLineNo">662</span>    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).<a name="line.662"></a>
-<span class="sourceLineNo">663</span>        setLevel(org.apache.log4j.Level.ERROR);<a name="line.663"></a>
-<span class="sourceLineNo">664</span>    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).<a name="line.664"></a>
-<span class="sourceLineNo">665</span>        setLevel(org.apache.log4j.Level.ERROR);<a name="line.665"></a>
-<span class="sourceLineNo">666</span><a name="line.666"></a>
-<span class="sourceLineNo">667</span>    TraceUtil.initTracer(conf);<a name="line.667"></a>
-<span class="sourceLineNo">668</span><a name="line.668"></a>
-<span class="sourceLineNo">669</span>    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,<a name="line.669"></a>
-<span class="sourceLineNo">670</span>        true, null, racks, hosts, null);<a name="line.670"></a>
-<span class="sourceLineNo">671</span><a name="line.671"></a>
-<span class="sourceLineNo">672</span>    // Set this just-started cluster as our filesystem.<a name="line.672"></a>
-<span class="sourceLineNo">673</span>    setFs();<a name="line.673"></a>
-<span class="sourceLineNo">674</span><a name="line.674"></a>
-<span class="sourceLineNo">675</span>    // Wait for the cluster to be totally up<a name="line.675"></a>
-<span class="sourceLineNo">676</span>    this.dfsCluster.waitClusterUp();<a name="line.676"></a>
-<span class="sourceLineNo">677</span><a name="line.677"></a>
-<span class="sourceLineNo">678</span>    //reset the test directory for test file system<a name="line.678"></a>
-<span class="sourceLineNo">679</span>    dataTestDirOnTestFS = null;<a name="line.679"></a>
-<span class="sourceLineNo">680</span>    String dataTestDir = getDataTestDir().toString();<a name="line.680"></a>
-<span class="sourceLineNo">681</span>    conf.set(HConstants.HBASE_DIR, dataTestDir);<a name="line.681"></a>
-<span class="sourceLineNo">682</span>    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);<a name="line.682"></a>
+<span class="sourceLineNo">590</span>   * Start a minidfscluster.<a name="line.590"></a>
+<span class="sourceLineNo">591</span>   * @param servers How many DNs to start.<a name="line.591"></a>
+<span class="sourceLineNo">592</span>   * @throws Exception<a name="line.592"></a>
+<span class="sourceLineNo">593</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.593"></a>
+<span class="sourceLineNo">594</span>   * @return The mini dfs cluster created.<a name="line.594"></a>
+<span class="sourceLineNo">595</span>   */<a name="line.595"></a>
+<span class="sourceLineNo">596</span>  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {<a name="line.596"></a>
+<span class="sourceLineNo">597</span>    return startMiniDFSCluster(servers, null);<a name="line.597"></a>
+<span class="sourceLineNo">598</span>  }<a name="line.598"></a>
+<span class="sourceLineNo">599</span><a name="line.599"></a>
+<span class="sourceLineNo">600</span>  /**<a name="line.600"></a>
+<span class="sourceLineNo">601</span>   * Start a minidfscluster.<a name="line.601"></a>
+<span class="sourceLineNo">602</span>   * This is useful if you want to run datanode on distinct hosts for things<a name="line.602"></a>
+<span class="sourceLineNo">603</span>   * like HDFS block location verification.<a name="line.603"></a>
+<span class="sourceLineNo">604</span>   * If you start MiniDFSCluster without host names, all instances of the<a name="line.604"></a>
+<span class="sourceLineNo">605</span>   * datanodes will have the same host name.<a name="line.605"></a>
+<span class="sourceLineNo">606</span>   * @param hosts hostnames DNs to run on.<a name="line.606"></a>
+<span class="sourceLineNo">607</span>   * @throws Exception<a name="line.607"></a>
+<span class="sourceLineNo">608</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.608"></a>
+<span class="sourceLineNo">609</span>   * @return The mini dfs cluster created.<a name="line.609"></a>
+<span class="sourceLineNo">610</span>   */<a name="line.610"></a>
+<span class="sourceLineNo">611</span>  public MiniDFSCluster startMiniDFSCluster(final String hosts[])<a name="line.611"></a>
+<span class="sourceLineNo">612</span>  throws Exception {<a name="line.612"></a>
+<span class="sourceLineNo">613</span>    if ( hosts != null &amp;&amp; hosts.length != 0) {<a name="line.613"></a>
+<span class="sourceLineNo">614</span>      return startMiniDFSCluster(hosts.length, hosts);<a name="line.614"></a>
+<span class="sourceLineNo">615</span>    } else {<a name="line.615"></a>
+<span class="sourceLineNo">616</span>      return startMiniDFSCluster(1, null);<a name="line.616"></a>
+<span class="sourceLineNo">617</span>    }<a name="line.617"></a>
+<span class="sourceLineNo">618</span>  }<a name="line.618"></a>
+<span class="sourceLineNo">619</span><a name="line.619"></a>
+<span class="sourceLineNo">620</span>  /**<a name="line.620"></a>
+<span class="sourceLineNo">621</span>   * Start a minidfscluster.<a name="line.621"></a>
+<span class="sourceLineNo">622</span>   * Can only create one.<a name="line.622"></a>
+<span class="sourceLineNo">623</span>   * @param servers How many DNs to start.<a name="line.623"></a>
+<span class="sourceLineNo">624</span>   * @param hosts hostnames DNs to run on.<a name="line.624"></a>
+<span class="sourceLineNo">625</span>   * @throws Exception<a name="line.625"></a>
+<span class="sourceLineNo">626</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.626"></a>
+<span class="sourceLineNo">627</span>   * @return The mini dfs cluster created.<a name="line.627"></a>
+<span class="sourceLineNo">628</span>   */<a name="line.628"></a>
+<span class="sourceLineNo">629</span>  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])<a name="line.629"></a>
+<span class="sourceLineNo">630</span>  throws Exception {<a name="line.630"></a>
+<span class="sourceLineNo">631</span>    return startMiniDFSCluster(servers, null, hosts);<a name="line.631"></a>
+<span class="sourceLineNo">632</span>  }<a name="line.632"></a>
+<span class="sourceLineNo">633</span><a name="line.633"></a>
+<span class="sourceLineNo">634</span>  private void setFs() throws IOException {<a name="line.634"></a>
+<span class="sourceLineNo">635</span>    if(this.dfsCluster == null){<a name="line.635"></a>
+<span class="sourceLineNo">636</span>      LOG.info("Skipping setting fs because dfsCluster is null");<a name="line.636"></a>
+<span class="sourceLineNo">637</span>      return;<a name="line.637"></a>
+<span class="sourceLineNo">638</span>    }<a name="line.638"></a>
+<span class="sourceLineNo">639</span>    FileSystem fs = this.dfsCluster.getFileSystem();<a name="line.639"></a>
+<span class="sourceLineNo">640</span>    FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));<a name="line.640"></a>
+<span class="sourceLineNo">641</span><a name="line.641"></a>
+<span class="sourceLineNo">642</span>    // re-enable this check with dfs<a name="line.642"></a>
+<span class="sourceLineNo">643</span>    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);<a name="line.643"></a>
+<span class="sourceLineNo">644</span>  }<a name="line.644"></a>
+<span class="sourceLineNo">645</span><a name="line.645"></a>
+<span class="sourceLineNo">646</span>  public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])<a name="line.646"></a>
+<span class="sourceLineNo">647</span>      throws Exception {<a name="line.647"></a>
+<span class="sourceLineNo">648</span>    createDirsAndSetProperties();<a name="line.648"></a>
+<span class="sourceLineNo">649</span>    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);<a name="line.649"></a>
+<span class="sourceLineNo">650</span><a name="line.650"></a>
+<span class="sourceLineNo">651</span>    // Error level to skip some warnings specific to the minicluster. See HBASE-4709<a name="line.651"></a>
+<span class="sourceLineNo">652</span>    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).<a name="line.652"></a>
+<span class="sourceLineNo">653</span>        setLevel(org.apache.log4j.Level.ERROR);<a name="line.653"></a>
+<span class="sourceLineNo">654</span>    org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).<a name="line.654"></a>
+<span class="sourceLineNo">655</span>        setLevel(org.apache.log4j.Level.ERROR);<a name="line.655"></a>
+<span class="sourceLineNo">656</span><a name="line.656"></a>
+<span class="sourceLineNo">657</span>    TraceUtil.initTracer(conf);<a name="line.657"></a>
+<span class="sourceLineNo">658</span><a name="line.658"></a>
+<span class="sourceLineNo">659</span>    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,<a name="line.659"></a>
+<span class="sourceLineNo">660</span>        true, null, racks, hosts, null);<a name="line.660"></a>
+<span class="sourceLineNo">661</span><a name="line.661"></a>
+<span class="sourceLineNo">662</span>    // Set this just-started cluster as our filesystem.<a name="line.662"></a>
+<span class="sourceLineNo">663</span>    setFs();<a name="line.663"></a>
+<span class="sourceLineNo">664</span><a name="line.664"></a>
+<span class="sourceLineNo">665</span>    // Wait for the cluster to be totally up<a name="line.665"></a>
+<span class="sourceLineNo">666</span>    this.dfsCluster.waitClusterUp();<a name="line.666"></a>
+<span class="sourceLineNo">667</span><a name="line.667"></a>
+<span class="sourceLineNo">668</span>    //reset the test directory for test file system<a name="line.668"></a>
+<span class="sourceLineNo">669</span>    dataTestDirOnTestFS = null;<a name="line.669"></a>
+<span class="sourceLineNo">670</span>    String dataTestDir = getDataTestDir().toString();<a name="line.670"></a>
+<span class="sourceLineNo">671</span>    conf.set(HConstants.HBASE_DIR, dataTestDir);<a name="line.671"></a>
+<span class="sourceLineNo">672</span>    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);<a name="line.672"></a>
+<span class="sourceLineNo">673</span><a name="line.673"></a>
+<span class="sourceLineNo">674</span>    return this.dfsCluster;<a name="line.674"></a>
+<span class="sourceLineNo">675</span>  }<a name="line.675"></a>
+<span class="sourceLineNo">676</span><a name="line.676"></a>
+<span class="sourceLineNo">677</span>  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {<a name="line.677"></a>
+<span class="sourceLineNo">678</span>    createDirsAndSetProperties();<a name="line.678"></a>
+<span class="sourceLineNo">679</span>    dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,<a name="line.679"></a>
+<span class="sourceLineNo">680</span>        null, null, null);<a name="line.680"></a>
+<span class="sourceLineNo">681</span>    return dfsCluster;<a name="line.681"></a>
+<span class="sourceLineNo">682</span>  }<a name="line.682"></a>
 <span class="sourceLineNo">683</span><a name="line.683"></a>
-<span class="sourceLineNo">684</span>    return this.dfsCluster;<a name="line.684"></a>
-<span class="sourceLineNo">685</span>  }<a name="line.685"></a>
-<span class="sourceLineNo">686</span><a name="line.686"></a>
-<span class="sourceLineNo">687</span>  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {<a name="line.687"></a>
-<span class="sourceLineNo">688</span>    createDirsAndSetProperties();<a name="line.688"></a>
-<span class="sourceLineNo">689</span>    dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,<a name="line.689"></a>
-<span class="sourceLineNo">690</span>        null, null, null);<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    return dfsCluster;<a name="line.691"></a>
-<span class="sourceLineNo">692</span>  }<a name="line.692"></a>
-<span class="sourceLineNo">693</span><a name="line.693"></a>
-<span class="sourceLineNo">694</span>  /** This is used before starting HDFS and map-reduce mini-clusters */<a name="line.694"></a>
-<span class="sourceLineNo">695</span>  private void createDirsAndSetProperties() throws IOException {<a name="line.695"></a>
-<span class="sourceLineNo">696</span>    setupClusterTestDir();<a name="line.696"></a>
-<span class="sourceLineNo">697</span>    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());<a name="line.697"></a>
-<span class="sourceLineNo">698</span>    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());<a name="line.698"></a>
-<span class="sourceLineNo">699</span>    createDirAndSetProperty("cache_data", "test.cache.data");<a name="line.699"></a>
-<span class="sourceLineNo">700</span>    createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");<a name="line.700"></a>
-<span class="sourceLineNo">701</span>    hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");<a name="line.701"></a>
-<span class="sourceLineNo">702</span>    createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");<a name="line.702"></a>
-<span class="sourceLineNo">703</span>    createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");<a name="line.703"></a>
-<span class="sourceLineNo">704</span>    enableShortCircuit();<a name="line.704"></a>
-<span class="sourceLineNo">705</span><a name="line.705"></a>
-<span class="sourceLineNo">706</span>    Path root = getDataTestDirOnTestFS("hadoop");<a name="line.706"></a>
-<span class="sourceLineNo">707</span>    conf.set(MapreduceTestingShim.getMROutputDirProp(),<a name="line.707"></a>
-<span class="sourceLineNo">708</span>      new Path(root, "mapred-output-dir").toString());<a name="line.708"></a>
-<span class="sourceLineNo">709</span>    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());<a name="line.709"></a>
-<span class="sourceLineNo">710</span>    conf.set("mapreduce.jobtracker.staging.root.dir",<a name="line.710"></a>
-<span class="sourceLineNo">711</span>      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());<a name="line.711"></a>
-<span class="sourceLineNo">712</span>    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());<a name="line.712"></a>
-<span class="sourceLineNo">713</span>    conf.set("yarn.app.mapreduce.am.staging-dir",<a name="line.713"></a>
-<span class="sourceLineNo">714</span>      new Path(root, "mapreduce-am-staging-root-dir").toString());<a name="line.714"></a>
-<span class="sourceLineNo">715</span>  }<a name="line.715"></a>
-<span class="sourceLineNo">716</span><a name="line.716"></a>
-<span class="sourceLineNo">717</span><a name="line.717"></a>
-<span class="sourceLineNo">718</span>  /**<a name="line.718"></a>
-<span class="sourceLineNo">719</span>   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.<a name="line.719"></a>
-<span class="sourceLineNo">720</span>   *  This allows to specify this parameter on the command line.<a name="line.720"></a>
-<span class="sourceLineNo">721</span>   *   If not set, default is true.<a name="line.721"></a>
-<span class="sourceLineNo">722</span>   */<a name="line.722"></a>
-<span class="sourceLineNo">723</span>  public boolean isReadShortCircuitOn(){<a name="line.723"></a>
-<span class="sourceLineNo">724</span>    final String propName = "hbase.tests.use.shortcircuit.reads";<a name="line.724"></a>
-<span class="sourceLineNo">725</span>    String readOnProp = System.getProperty(propName);<a name="line.725"></a>
-<span class="sourceLineNo">726</span>    if (readOnProp != null){<a name="line.726"></a>
-<span class="sourceLineNo">727</span>      return  Boolean.parseBoolean(readOnProp);<a name="line.727"></a>
-<span class="sourceLineNo">728</span>    } else {<a name="line.728"></a>
-<span class="sourceLineNo">729</span>      return conf.getBoolean(propName, false);<a name="line.729"></a>
-<span class="sourceLineNo">730</span>    }<a name="line.730"></a>
-<span class="sourceLineNo">731</span>  }<a name="line.731"></a>
-<span class="sourceLineNo">732</span><a name="line.732"></a>
-<span class="sourceLineNo">733</span>  /** Enable the short circuit read, unless configured differently.<a name="line.733"></a>
-<span class="sourceLineNo">734</span>   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.<a name="line.734"></a>
-<span class="sourceLineNo">735</span>   */<a name="line.735"></a>
-<span class="sourceLineNo">736</span>  private void enableShortCircuit() {<a name="line.736"></a>
-<span class="sourceLineNo">737</span>    if (isReadShortCircuitOn()) {<a name="line.737"></a>
-<span class="sourceLineNo">738</span>      String curUser = System.getProperty("user.name");<a name="line.738"></a>
-<span class="sourceLineNo">739</span>      LOG.info("read short circuit is ON for user " + curUser);<a name="line.739"></a>
-<span class="sourceLineNo">740</span>      // read short circuit, for hdfs<a name="line.740"></a>
-<span class="sourceLineNo">741</span>      conf.set("dfs.block.local-path-access.user", curUser);<a name="line.741"></a>
-<span class="sourceLineNo">742</span>      // read short circuit, for hbase<a name="line.742"></a>
-<span class="sourceLineNo">743</span>      conf.setBoolean("dfs.client.read.shortcircuit", true);<a name="line.743"></a>
-<span class="sourceLineNo">744</span>      // Skip checking checksum, for the hdfs client and the datanode<a name="line.744"></a>
-<span class="sourceLineNo">745</span>      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);<a name="line.745"></a>
-<span class="sourceLineNo">746</span>    } else {<a name="line.746"></a>
-<span class="sourceLineNo">747</span>      LOG.info("read short circuit is OFF");<a name="line.747"></a>
-<span class="sourceLineNo">748</span>    }<a name="line.748"></a>
-<span class="sourceLineNo">749</span>  }<a name="line.749"></a>
-<span class="sourceLineNo">750</span><a name="line.750"></a>
-<span class="sourceLineNo">751</span>  private String createDirAndSetProperty(final String relPath, String property) {<a name="line.751"></a>
-<span class="sourceLineNo">752</span>    String path = getDataTestDir(relPath).toString();<a name="line.752"></a>
-<span class="sourceLineNo">753</span>    System.setProperty(property, path);<a name="line.753"></a>
-<span class="sourceLineNo">754</span>    conf.set(property, path);<a name="line.754"></a>
-<span class="sourceLineNo">755</span>    new File(path).mkdirs();<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");<a name="line.756"></a>
-<span class="sourceLineNo">757</span>    return path;<a name="line.757"></a>
-<span class="sourceLineNo">758</span>  }<a name="line.758"></a>
-<span class="sourceLineNo">759</span><a name="line.759"></a>
-<span class="sourceLineNo">760</span>  /**<a name="line.760"></a>
-<span class="sourceLineNo">761</span>   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}<a name="line.761"></a>
-<span class="sourceLineNo">762</span>   * or does nothing.<a name="line.762"></a>
-<span class="sourceLineNo">763</span>   * @throws IOException<a name="line.763"></a>
-<span class="sourceLineNo">764</span>   */<a name="line.764"></a>
-<span class="sourceLineNo">765</span>  public void shutdownMiniDFSCluster() throws IOException {<a name="line.765"></a>
-<span class="sourceLineNo">766</span>    if (this.dfsCluster != null) {<a name="line.766"></a>
-<span class="sourceLineNo">767</span>      // The below throws an exception per dn, AsynchronousCloseException.<a name="line.767"></a>
-<span class="sourceLineNo">768</span>      this.dfsCluster.shutdown();<a name="line.768"></a>
-<span class="sourceLineNo">769</span>      dfsCluster = null;<a name="line.769"></a>
-<span class="sourceLineNo">770</span>      dataTestDirOnTestFS = null;<a name="line.770"></a>
-<span class="sourceLineNo">771</span>      FSUtils.setFsDefault(this.conf, new Path("file:///"));<a name="line.771"></a>
-<span class="sourceLineNo">772</span>    }<a name="line.772"></a>
-<span class="sourceLineNo">773</span>  }<a name="line.773"></a>
-<span class="sourceLineNo">774</span><a name="line.774"></a>
+<span class="sourceLineNo">684</span>  /** This is used before starting HDFS and map-reduce mini-clusters */<a name="line.684"></a>
+<span class="sourceLineNo">685</span>  private void createDirsAndSetProperties() throws IOException {<a name="line.685"></a>
+<span class="sourceLineNo">686</span>    setupClusterTestDir();<a name="line.686"></a>
+<span class="sourceLineNo">687</span>    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());<a name="line.687"></a>
+<span class="sourceLineNo">688</span>    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());<a name="line.688"></a>
+<span class="sourceLineNo">689</span>    createDirAndSetProperty("cache_data", "test.cache.data");<a name="line.689"></a>
+<span class="sourceLineNo">690</span>    createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");<a name="line.690"></a>
+<span class="sourceLineNo">691</span>    hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");<a name="line.691"></a>
+<span class="sourceLineNo">692</span>    createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");<a name="line.692"></a>
+<span class="sourceLineNo">693</span>    createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");<a name="line.693"></a>
+<span class="sourceLineNo">694</span>    enableShortCircuit();<a name="line.694"></a>
+<span class="sourceLineNo">695</span><a name="line.695"></a>
+<span class="sourceLineNo">696</span>    Path root = getDataTestDirOnTestFS("hadoop");<a name="line.696"></a>
+<span class="sourceLineNo">697</span>    conf.set(MapreduceTestingShim.getMROutputDirProp(),<a name="line.697"></a>
+<span class="sourceLineNo">698</span>      new Path(root, "mapred-output-dir").toString());<a name="line.698"></a>
+<span class="sourceLineNo">699</span>    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());<a name="line.699"></a>
+<span class="sourceLineNo">700</span>    conf.set("mapreduce.jobtracker.staging.root.dir",<a name="line.700"></a>
+<span class="sourceLineNo">701</span>      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());<a name="line.701"></a>
+<span class="sourceLineNo">702</span>    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());<a name="line.702"></a>
+<span class="sourceLineNo">703</span>    conf.set("yarn.app.mapreduce.am.staging-dir",<a name="line.703"></a>
+<span class="sourceLineNo">704</span>      new Path(root, "mapreduce-am-staging-root-dir").toString());<a name="line.704"></a>
+<span class="sourceLineNo">705</span>  }<a name="line.705"></a>
+<span class="sourceLineNo">706</span><a name="line.706"></a>
+<span class="sourceLineNo">707</span><a name="line.707"></a>
+<span class="sourceLineNo">708</span>  /**<a name="line.708"></a>
+<span class="sourceLineNo">709</span>   *  Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.<a name="line.709"></a>
+<span class="sourceLineNo">710</span>   *  This allows to specify this parameter on the command line.<a name="line.710"></a>
+<span class="sourceLineNo">711</span>   *   If not set, default is true.<a name="line.711"></a>
+<span class="sourceLineNo">712</span>   */<a name="line.712"></a>
+<span class="sourceLineNo">713</span>  public boolean isReadShortCircuitOn(){<a name="line.713"></a>
+<span class="sourceLineNo">714</span>    final String propName = "hbase.tests.use.shortcircuit.reads";<a name="line.714"></a>
+<span class="sourceLineNo">715</span>    String readOnProp = System.getProperty(propName);<a name="line.715"></a>
+<span class="sourceLineNo">716</span>    if (readOnProp != null){<a name="line.716"></a>
+<span class="sourceLineNo">717</span>      return  Boolean.parseBoolean(readOnProp);<a name="line.717"></a>
+<span class="sourceLineNo">718</span>    } else {<a name="line.718"></a>
+<span class="sourceLineNo">719</span>      return conf.getBoolean(propName, false);<a name="line.719"></a>
+<span class="sourceLineNo">720</span>    }<a name="line.720"></a>
+<span class="sourceLineNo">721</span>  }<a name="line.721"></a>
+<span class="sourceLineNo">722</span><a name="line.722"></a>
+<span class="sourceLineNo">723</span>  /** Enable the short circuit read, unless configured differently.<a name="line.723"></a>
+<span class="sourceLineNo">724</span>   * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.<a name="line.724"></a>
+<span class="sourceLineNo">725</span>   */<a name="line.725"></a>
+<span class="sourceLineNo">726</span>  private void enableShortCircuit() {<a name="line.726"></a>
+<span class="sourceLineNo">727</span>    if (isReadShortCircuitOn()) {<a name="line.727"></a>
+<span class="sourceLineNo">728</span>      String curUser = System.getProperty("user.name");<a name="line.728"></a>
+<span class="sourceLineNo">729</span>      LOG.info("read short circuit is ON for user " + curUser);<a name="line.729"></a>
+<span class="sourceLineNo">730</span>      // read short circuit, for hdfs<a name="line.730"></a>
+<span class="sourceLineNo">731</span>      conf.set("dfs.block.local-path-access.user", curUser);<a name="line.731"></a>
+<span class="sourceLineNo">732</span>      // read short circuit, for hbase<a name="line.732"></a>
+<span class="sourceLineNo">733</span>      conf.setBoolean("dfs.client.read.shortcircuit", true);<a name="line.733"></a>
+<span class="sourceLineNo">734</span>      // Skip checking checksum, for the hdfs client and the datanode<a name="line.734"></a>
+<span class="sourceLineNo">735</span>      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);<a name="line.735"></a>
+<span class="sourceLineNo">736</span>    } else {<a name="line.736"></a>
+<span class="sourceLineNo">737</span>      LOG.info("read short circuit is OFF");<a name="line.737"></a>
+<span class="sourceLineNo">738</span>    }<a name="line.738"></a>
+<span class="sourceLineNo">739</span>  }<a name="line.739"></a>
+<span class="sourceLineNo">740</span><a name="line.740"></a>
+<span class="sourceLineNo">741</span>  private String createDirAndSetProperty(final String relPath, String property) {<a name="line.741"></a>
+<span class="sourceLineNo">742</span>    String path = getDataTestDir(relPath).toString();<a name="line.742"></a>
+<span class="sourceLineNo">743</span>    System.setProperty(property, path);<a name="line.743"></a>
+<span class="sourceLineNo">744</span>    conf.set(property, path);<a name="line.744"></a>
+<span class="sourceLineNo">745</span>    new File(path).mkdirs();<a name="line.745"></a>
+<span class="sourceLineNo">746</span>    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");<a name="line.746"></a>
+<span class="sourceLineNo">747</span>    return path;<a name="line.747"></a>
+<span class="sourceLineNo">748</span>  }<a name="line.748"></a>
+<span class="sourceLineNo">749</span><a name="line.749"></a>
+<span class="sourceLineNo">750</span>  /**<a name="line.750"></a>
+<span class="sourceLineNo">751</span>   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}<a name="line.751"></a>
+<span class="sourceLineNo">752</span>   * or does nothing.<a name="line.752"></a>
+<span class="sourceLineNo">753</span>   * @throws IOException<a name="line.753"></a>
+<span class="sourceLineNo">754</span>   */<a name="line.754"></a>
+<span class="sourceLineNo">755</span>  public void shutdownMiniDFSCluster() throws IOException {<a name="line.755"></a>
+<span class="sourceLineNo">756</span>    if (this.dfsCluster != null) {<a name="line.756"></a>
+<span class="sourceLineNo">757</span>      // The below throws an exception per dn, AsynchronousCloseException.<a name="line.757"></a>
+<span class="sourceLineNo">758</span>      this.dfsCluster.shutdown();<a name="line.758"></a>
+<span class="sourceLineNo">759</span>      dfsCluster = null;<a name="line.759"></a>
+<span class="sourceLineNo">760</span>      dataTestDirOnTestFS = null;<a name="line.760"></a>
+<span class="sourceLineNo">761</span>      FSUtils.setFsDefault(this.conf, new Path("file:///"));<a name="line.761"></a>
+<span class="sourceLineNo">762</span>    }<a name="line.762"></a>
+<span class="sourceLineNo">763</span>  }<a name="line.763"></a>
+<span class="sourceLineNo">764</span><a name="line.764"></a>
+<span class="sourceLineNo">765</span><a name="line.765"></a>
+<span class="sourceLineNo">766</span>  /**<a name="line.766"></a>
+<span class="sourceLineNo">767</span>   * Start up a minicluster of hbase, dfs, and zookeeper.<a name="line.767"></a>
+<span class="sourceLineNo">768</span>   * @throws Exception<a name="line.768"></a>
+<span class="sourceLineNo">769</span>   * @return Mini hbase cluster instance created.<a name="line.769"></a>
+<span class="sourceLineNo">770</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.770"></a>
+<span class="sourceLineNo">771</span>   */<a name="line.771"></a>
+<span class="sourceLineNo">772</span>  public MiniHBaseCluster startMiniCluster() throws Exception {<a name="line.772"></a>
+<span class="sourceLineNo">773</span>    return startMiniCluster(1, 1);<a name="line.773"></a>
+<span class="sourceLineNo">774</span>  }<a name="line.774"></a>
 <span class="sourceLineNo">775</span><a name="line.775"></a>
 <span class="sourceLineNo">776</span>  /**<a name="line.776"></a>
-<span class="sourceLineNo">777</span>   * Start up a minicluster of hbase, dfs, and zookeeper.<a name="line.777"></a>
+<span class="sourceLineNo">777</span>   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.<a name="line.777"></a>
 <span class="sourceLineNo">778</span>   * @throws Exception<a name="line.778"></a>
 <span class="sourceLineNo">779</span>   * @return Mini hbase cluster instance created.<a name="line.779"></a>
 <span class="sourceLineNo">780</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.780"></a>
 <span class="sourceLineNo">781</span>   */<a name="line.781"></a>
-<span class="sourceLineNo">782</span>  public MiniHBaseCluster startMiniCluster() throws Exception {<a name="line.782"></a>
-<span class="sourceLineNo">783</span>    return startMiniCluster(1, 1);<a name="line.783"></a>
+<span class="sourceLineNo">782</span>  public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {<a name="line.782"></a>
+<span class="sourceLineNo">783</span>    return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);<a name="line.783"></a>
 <span class="sourceLineNo">784</span>  }<a name="line.784"></a>
 <span class="sourceLineNo">785</span><a name="line.785"></a>
 <span class="sourceLineNo">786</span>  /**<a name="line.786"></a>
-<span class="sourceLineNo">787</span>   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.<a name="line.787"></a>
-<span class="sourceLineNo">788</span>   * @throws Exception<a name="line.788"></a>
-<span class="sourceLineNo">789</span>   * @return Mini hbase cluster instance created.<a name="line.789"></a>
-<span class="sourceLineNo">790</span>   * @see {@link #shutdownMiniDFSCluster()}<a name="line.790"></a>
-<span class="sourceLineNo">791</span>   */<a name="line.791"></a>
-<span class="sourceLineNo">792</span>  public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {<a name="line.792"></a>
-<span class="sourceLineNo">793</span>    return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);<a name="line.793"></a>
-<span class="sourceLineNo">794</span>  }<a name="line.794"></a>
-<span class="sourceLineNo">795</span><a name="line.795"></a>
-<span class="sourceLineNo">796</span>  /**<a name="line.796"></a>
-<span class="sourceLineNo">797</span>   * Start up a minicluster of hbase, dfs, and zookeeper.<a name="line.797"></a>
-<span class="sourceLineNo">798</span>   * Set the &lt;code&gt;create&lt;/code&gt; flag to create root or data directory path or not<a name="line.798"></a>
-<span class="sourceLineNo">799</span>   * (will overwrite if dir already exists)<a name="line.799"></a>
-<span class="sourceLineNo">800</span>   * @throws Exception<a name="line.800"></a>
-<span class="sourceLineNo">801</span>   * @return Mini hbase clus

<TRUNCATED>