You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by gi...@apache.org on 2018/07/04 14:51:13 UTC

[09/31] hbase-site git commit: Published site at 3e7f724837e35f41153bc3059e8ac507583a9200.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/624dc6ea/devapidocs/src-html/org/apache/hadoop/hbase/util/CommonFSUtils.StreamCapabilities.html
----------------------------------------------------------------------
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/CommonFSUtils.StreamCapabilities.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/CommonFSUtils.StreamCapabilities.html
index 65be929..6b83bf9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/CommonFSUtils.StreamCapabilities.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/CommonFSUtils.StreamCapabilities.html
@@ -462,87 +462,87 @@
 <span class="sourceLineNo">454</span>        new Path(namespace)));<a name="line.454"></a>
 <span class="sourceLineNo">455</span>  }<a name="line.455"></a>
 <span class="sourceLineNo">456</span><a name="line.456"></a>
-<span class="sourceLineNo">457</span>  /**<a name="line.457"></a>
-<span class="sourceLineNo">458</span>   * Sets storage policy for given path according to config setting.<a name="line.458"></a>
-<span class="sourceLineNo">459</span>   * If the passed path is a directory, we'll set the storage policy for all files<a name="line.459"></a>
-<span class="sourceLineNo">460</span>   * created in the future in said directory. Note that this change in storage<a name="line.460"></a>
-<span class="sourceLineNo">461</span>   * policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle.<a name="line.461"></a>
-<span class="sourceLineNo">462</span>   * If we're running on a FileSystem implementation that doesn't support the given storage policy<a name="line.462"></a>
-<span class="sourceLineNo">463</span>   * (or storage policies at all), then we'll issue a log message and continue.<a name="line.463"></a>
-<span class="sourceLineNo">464</span>   *<a name="line.464"></a>
-<span class="sourceLineNo">465</span>   * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html<a name="line.465"></a>
-<span class="sourceLineNo">466</span>   *<a name="line.466"></a>
-<span class="sourceLineNo">467</span>   * @param fs We only do anything it implements a setStoragePolicy method<a name="line.467"></a>
-<span class="sourceLineNo">468</span>   * @param conf used to look up storage policy with given key; not modified.<a name="line.468"></a>
-<span class="sourceLineNo">469</span>   * @param path the Path whose storage policy is to be set<a name="line.469"></a>
-<span class="sourceLineNo">470</span>   * @param policyKey Key to use pulling a policy from Configuration:<a name="line.470"></a>
-<span class="sourceLineNo">471</span>   *   e.g. HConstants.WAL_STORAGE_POLICY (hbase.wal.storage.policy).<a name="line.471"></a>
-<span class="sourceLineNo">472</span>   * @param defaultPolicy if the configured policy is equal to this policy name, we will skip<a name="line.472"></a>
-<span class="sourceLineNo">473</span>   *   telling the FileSystem to set a storage policy.<a name="line.473"></a>
-<span class="sourceLineNo">474</span>   */<a name="line.474"></a>
-<span class="sourceLineNo">475</span>  public static void setStoragePolicy(final FileSystem fs, final Configuration conf,<a name="line.475"></a>
-<span class="sourceLineNo">476</span>      final Path path, final String policyKey, final String defaultPolicy) {<a name="line.476"></a>
-<span class="sourceLineNo">477</span>    String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(Locale.ROOT);<a name="line.477"></a>
-<span class="sourceLineNo">478</span>    if (storagePolicy.equals(defaultPolicy)) {<a name="line.478"></a>
-<span class="sourceLineNo">479</span>      if (LOG.isTraceEnabled()) {<a name="line.479"></a>
-<span class="sourceLineNo">480</span>        LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");<a name="line.480"></a>
-<span class="sourceLineNo">481</span>      }<a name="line.481"></a>
-<span class="sourceLineNo">482</span>      return;<a name="line.482"></a>
-<span class="sourceLineNo">483</span>    }<a name="line.483"></a>
-<span class="sourceLineNo">484</span>    setStoragePolicy(fs, path, storagePolicy);<a name="line.484"></a>
-<span class="sourceLineNo">485</span>  }<a name="line.485"></a>
-<span class="sourceLineNo">486</span><a name="line.486"></a>
-<span class="sourceLineNo">487</span>  // this mapping means that under a federated FileSystem implementation, we'll<a name="line.487"></a>
-<span class="sourceLineNo">488</span>  // only log the first failure from any of the underlying FileSystems at WARN and all others<a name="line.488"></a>
-<span class="sourceLineNo">489</span>  // will be at DEBUG.<a name="line.489"></a>
-<span class="sourceLineNo">490</span>  private static final Map&lt;FileSystem, Boolean&gt; warningMap =<a name="line.490"></a>
-<span class="sourceLineNo">491</span>      new ConcurrentHashMap&lt;FileSystem, Boolean&gt;();<a name="line.491"></a>
-<span class="sourceLineNo">492</span><a name="line.492"></a>
-<span class="sourceLineNo">493</span>  /**<a name="line.493"></a>
-<span class="sourceLineNo">494</span>   * Sets storage policy for given path.<a name="line.494"></a>
-<span class="sourceLineNo">495</span>   * If the passed path is a directory, we'll set the storage policy for all files<a name="line.495"></a>
-<span class="sourceLineNo">496</span>   * created in the future in said directory. Note that this change in storage<a name="line.496"></a>
-<span class="sourceLineNo">497</span>   * policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle.<a name="line.497"></a>
-<span class="sourceLineNo">498</span>   * If we're running on a version of FileSystem that doesn't support the given storage policy<a name="line.498"></a>
-<span class="sourceLineNo">499</span>   * (or storage policies at all), then we'll issue a log message and continue.<a name="line.499"></a>
-<span class="sourceLineNo">500</span>   *<a name="line.500"></a>
-<span class="sourceLineNo">501</span>   * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html<a name="line.501"></a>
-<span class="sourceLineNo">502</span>   *<a name="line.502"></a>
-<span class="sourceLineNo">503</span>   * @param fs We only do anything it implements a setStoragePolicy method<a name="line.503"></a>
-<span class="sourceLineNo">504</span>   * @param path the Path whose storage policy is to be set<a name="line.504"></a>
-<span class="sourceLineNo">505</span>   * @param storagePolicy Policy to set on &lt;code&gt;path&lt;/code&gt;; see hadoop 2.6+<a name="line.505"></a>
-<span class="sourceLineNo">506</span>   *   org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g<a name="line.506"></a>
-<span class="sourceLineNo">507</span>   *   'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.<a name="line.507"></a>
-<span class="sourceLineNo">508</span>   */<a name="line.508"></a>
-<span class="sourceLineNo">509</span>  public static void setStoragePolicy(final FileSystem fs, final Path path,<a name="line.509"></a>
-<span class="sourceLineNo">510</span>      final String storagePolicy) {<a name="line.510"></a>
-<span class="sourceLineNo">511</span>    if (storagePolicy == null) {<a name="line.511"></a>
-<span class="sourceLineNo">512</span>      if (LOG.isTraceEnabled()) {<a name="line.512"></a>
-<span class="sourceLineNo">513</span>        LOG.trace("We were passed a null storagePolicy, exiting early.");<a name="line.513"></a>
-<span class="sourceLineNo">514</span>      }<a name="line.514"></a>
-<span class="sourceLineNo">515</span>      return;<a name="line.515"></a>
-<span class="sourceLineNo">516</span>    }<a name="line.516"></a>
-<span class="sourceLineNo">517</span>    final String trimmedStoragePolicy = storagePolicy.trim();<a name="line.517"></a>
-<span class="sourceLineNo">518</span>    if (trimmedStoragePolicy.isEmpty()) {<a name="line.518"></a>
-<span class="sourceLineNo">519</span>      if (LOG.isTraceEnabled()) {<a name="line.519"></a>
-<span class="sourceLineNo">520</span>        LOG.trace("We were passed an empty storagePolicy, exiting early.");<a name="line.520"></a>
+<span class="sourceLineNo">457</span>  // this mapping means that under a federated FileSystem implementation, we'll<a name="line.457"></a>
+<span class="sourceLineNo">458</span>  // only log the first failure from any of the underlying FileSystems at WARN and all others<a name="line.458"></a>
+<span class="sourceLineNo">459</span>  // will be at DEBUG.<a name="line.459"></a>
+<span class="sourceLineNo">460</span>  private static final Map&lt;FileSystem, Boolean&gt; warningMap =<a name="line.460"></a>
+<span class="sourceLineNo">461</span>      new ConcurrentHashMap&lt;FileSystem, Boolean&gt;();<a name="line.461"></a>
+<span class="sourceLineNo">462</span><a name="line.462"></a>
+<span class="sourceLineNo">463</span>  /**<a name="line.463"></a>
+<span class="sourceLineNo">464</span>   * Sets storage policy for given path.<a name="line.464"></a>
+<span class="sourceLineNo">465</span>   * If the passed path is a directory, we'll set the storage policy for all files<a name="line.465"></a>
+<span class="sourceLineNo">466</span>   * created in the future in said directory. Note that this change in storage<a name="line.466"></a>
+<span class="sourceLineNo">467</span>   * policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle.<a name="line.467"></a>
+<span class="sourceLineNo">468</span>   * If we're running on a version of FileSystem that doesn't support the given storage policy<a name="line.468"></a>
+<span class="sourceLineNo">469</span>   * (or storage policies at all), then we'll issue a log message and continue.<a name="line.469"></a>
+<span class="sourceLineNo">470</span>   *<a name="line.470"></a>
+<span class="sourceLineNo">471</span>   * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html<a name="line.471"></a>
+<span class="sourceLineNo">472</span>   *<a name="line.472"></a>
+<span class="sourceLineNo">473</span>   * @param fs We only do anything it implements a setStoragePolicy method<a name="line.473"></a>
+<span class="sourceLineNo">474</span>   * @param path the Path whose storage policy is to be set<a name="line.474"></a>
+<span class="sourceLineNo">475</span>   * @param storagePolicy Policy to set on &lt;code&gt;path&lt;/code&gt;; see hadoop 2.6+<a name="line.475"></a>
+<span class="sourceLineNo">476</span>   *   org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g<a name="line.476"></a>
+<span class="sourceLineNo">477</span>   *   'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.<a name="line.477"></a>
+<span class="sourceLineNo">478</span>   */<a name="line.478"></a>
+<span class="sourceLineNo">479</span>  public static void setStoragePolicy(final FileSystem fs, final Path path,<a name="line.479"></a>
+<span class="sourceLineNo">480</span>      final String storagePolicy) {<a name="line.480"></a>
+<span class="sourceLineNo">481</span>    try {<a name="line.481"></a>
+<span class="sourceLineNo">482</span>      setStoragePolicy(fs, path, storagePolicy, false);<a name="line.482"></a>
+<span class="sourceLineNo">483</span>    } catch (IOException e) {<a name="line.483"></a>
+<span class="sourceLineNo">484</span>      // should never arrive here<a name="line.484"></a>
+<span class="sourceLineNo">485</span>      LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);<a name="line.485"></a>
+<span class="sourceLineNo">486</span>    }<a name="line.486"></a>
+<span class="sourceLineNo">487</span>  }<a name="line.487"></a>
+<span class="sourceLineNo">488</span><a name="line.488"></a>
+<span class="sourceLineNo">489</span>  static void setStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy,<a name="line.489"></a>
+<span class="sourceLineNo">490</span>      boolean throwException) throws IOException {<a name="line.490"></a>
+<span class="sourceLineNo">491</span>    if (storagePolicy == null) {<a name="line.491"></a>
+<span class="sourceLineNo">492</span>      if (LOG.isTraceEnabled()) {<a name="line.492"></a>
+<span class="sourceLineNo">493</span>        LOG.trace("We were passed a null storagePolicy, exiting early.");<a name="line.493"></a>
+<span class="sourceLineNo">494</span>      }<a name="line.494"></a>
+<span class="sourceLineNo">495</span>      return;<a name="line.495"></a>
+<span class="sourceLineNo">496</span>    }<a name="line.496"></a>
+<span class="sourceLineNo">497</span>    String trimmedStoragePolicy = storagePolicy.trim();<a name="line.497"></a>
+<span class="sourceLineNo">498</span>    if (trimmedStoragePolicy.isEmpty()) {<a name="line.498"></a>
+<span class="sourceLineNo">499</span>      if (LOG.isTraceEnabled()) {<a name="line.499"></a>
+<span class="sourceLineNo">500</span>        LOG.trace("We were passed an empty storagePolicy, exiting early.");<a name="line.500"></a>
+<span class="sourceLineNo">501</span>      }<a name="line.501"></a>
+<span class="sourceLineNo">502</span>      return;<a name="line.502"></a>
+<span class="sourceLineNo">503</span>    } else {<a name="line.503"></a>
+<span class="sourceLineNo">504</span>      trimmedStoragePolicy = trimmedStoragePolicy.toUpperCase(Locale.ROOT);<a name="line.504"></a>
+<span class="sourceLineNo">505</span>    }<a name="line.505"></a>
+<span class="sourceLineNo">506</span>    if (trimmedStoragePolicy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)) {<a name="line.506"></a>
+<span class="sourceLineNo">507</span>      if (LOG.isTraceEnabled()) {<a name="line.507"></a>
+<span class="sourceLineNo">508</span>        LOG.trace("We were passed the defer-to-hdfs policy {}, exiting early.",<a name="line.508"></a>
+<span class="sourceLineNo">509</span>          trimmedStoragePolicy);<a name="line.509"></a>
+<span class="sourceLineNo">510</span>      }<a name="line.510"></a>
+<span class="sourceLineNo">511</span>      return;<a name="line.511"></a>
+<span class="sourceLineNo">512</span>    }<a name="line.512"></a>
+<span class="sourceLineNo">513</span>    try {<a name="line.513"></a>
+<span class="sourceLineNo">514</span>      invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);<a name="line.514"></a>
+<span class="sourceLineNo">515</span>    } catch (IOException e) {<a name="line.515"></a>
+<span class="sourceLineNo">516</span>      if (LOG.isTraceEnabled()) {<a name="line.516"></a>
+<span class="sourceLineNo">517</span>        LOG.trace("Failed to invoke set storage policy API on FS", e);<a name="line.517"></a>
+<span class="sourceLineNo">518</span>      }<a name="line.518"></a>
+<span class="sourceLineNo">519</span>      if (throwException) {<a name="line.519"></a>
+<span class="sourceLineNo">520</span>        throw e;<a name="line.520"></a>
 <span class="sourceLineNo">521</span>      }<a name="line.521"></a>
-<span class="sourceLineNo">522</span>      return;<a name="line.522"></a>
-<span class="sourceLineNo">523</span>    }<a name="line.523"></a>
-<span class="sourceLineNo">524</span>    invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);<a name="line.524"></a>
-<span class="sourceLineNo">525</span>  }<a name="line.525"></a>
-<span class="sourceLineNo">526</span><a name="line.526"></a>
-<span class="sourceLineNo">527</span>  /*<a name="line.527"></a>
-<span class="sourceLineNo">528</span>   * All args have been checked and are good. Run the setStoragePolicy invocation.<a name="line.528"></a>
-<span class="sourceLineNo">529</span>   */<a name="line.529"></a>
-<span class="sourceLineNo">530</span>  private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,<a name="line.530"></a>
-<span class="sourceLineNo">531</span>      final String storagePolicy) {<a name="line.531"></a>
-<span class="sourceLineNo">532</span>    Method m = null;<a name="line.532"></a>
-<span class="sourceLineNo">533</span>    try {<a name="line.533"></a>
-<span class="sourceLineNo">534</span>      m = fs.getClass().getDeclaredMethod("setStoragePolicy",<a name="line.534"></a>
-<span class="sourceLineNo">535</span>        new Class&lt;?&gt;[] { Path.class, String.class });<a name="line.535"></a>
-<span class="sourceLineNo">536</span>      m.setAccessible(true);<a name="line.536"></a>
-<span class="sourceLineNo">537</span>    } catch (NoSuchMethodException e) {<a name="line.537"></a>
+<span class="sourceLineNo">522</span>    }<a name="line.522"></a>
+<span class="sourceLineNo">523</span>  }<a name="line.523"></a>
+<span class="sourceLineNo">524</span><a name="line.524"></a>
+<span class="sourceLineNo">525</span>  /*<a name="line.525"></a>
+<span class="sourceLineNo">526</span>   * All args have been checked and are good. Run the setStoragePolicy invocation.<a name="line.526"></a>
+<span class="sourceLineNo">527</span>   */<a name="line.527"></a>
+<span class="sourceLineNo">528</span>  private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,<a name="line.528"></a>
+<span class="sourceLineNo">529</span>      final String storagePolicy) throws IOException {<a name="line.529"></a>
+<span class="sourceLineNo">530</span>    Method m = null;<a name="line.530"></a>
+<span class="sourceLineNo">531</span>    Exception toThrow = null;<a name="line.531"></a>
+<span class="sourceLineNo">532</span>    try {<a name="line.532"></a>
+<span class="sourceLineNo">533</span>      m = fs.getClass().getDeclaredMethod("setStoragePolicy",<a name="line.533"></a>
+<span class="sourceLineNo">534</span>        new Class&lt;?&gt;[] { Path.class, String.class });<a name="line.534"></a>
+<span class="sourceLineNo">535</span>      m.setAccessible(true);<a name="line.535"></a>
+<span class="sourceLineNo">536</span>    } catch (NoSuchMethodException e) {<a name="line.536"></a>
+<span class="sourceLineNo">537</span>      toThrow = e;<a name="line.537"></a>
 <span class="sourceLineNo">538</span>      final String msg = "FileSystem doesn't support setStoragePolicy; HDFS-6584, HDFS-9345 " +<a name="line.538"></a>
 <span class="sourceLineNo">539</span>          "not available. This is normal and expected on earlier Hadoop versions.";<a name="line.539"></a>
 <span class="sourceLineNo">540</span>      if (!warningMap.containsKey(fs)) {<a name="line.540"></a>
@@ -553,357 +553,362 @@
 <span class="sourceLineNo">545</span>      }<a name="line.545"></a>
 <span class="sourceLineNo">546</span>      m = null;<a name="line.546"></a>
 <span class="sourceLineNo">547</span>    } catch (SecurityException e) {<a name="line.547"></a>
-<span class="sourceLineNo">548</span>      final String msg = "No access to setStoragePolicy on FileSystem from the SecurityManager; " +<a name="line.548"></a>
-<span class="sourceLineNo">549</span>          "HDFS-6584, HDFS-9345 not available. This is unusual and probably warrants an email " +<a name="line.549"></a>
-<span class="sourceLineNo">550</span>          "to the user@hbase mailing list. Please be sure to include a link to your configs, and " +<a name="line.550"></a>
-<span class="sourceLineNo">551</span>          "logs that include this message and period of time before it. Logs around service " +<a name="line.551"></a>
-<span class="sourceLineNo">552</span>          "start up will probably be useful as well.";<a name="line.552"></a>
-<span class="sourceLineNo">553</span>      if (!warningMap.containsKey(fs)) {<a name="line.553"></a>
-<span class="sourceLineNo">554</span>        warningMap.put(fs, true);<a name="line.554"></a>
-<span class="sourceLineNo">555</span>        LOG.warn(msg, e);<a name="line.555"></a>
-<span class="sourceLineNo">556</span>      } else if (LOG.isDebugEnabled()) {<a name="line.556"></a>
-<span class="sourceLineNo">557</span>        LOG.debug(msg, e);<a name="line.557"></a>
-<span class="sourceLineNo">558</span>      }<a name="line.558"></a>
-<span class="sourceLineNo">559</span>      m = null; // could happen on setAccessible() or getDeclaredMethod()<a name="line.559"></a>
-<span class="sourceLineNo">560</span>    }<a name="line.560"></a>
-<span class="sourceLineNo">561</span>    if (m != null) {<a name="line.561"></a>
-<span class="sourceLineNo">562</span>      try {<a name="line.562"></a>
-<span class="sourceLineNo">563</span>        m.invoke(fs, path, storagePolicy);<a name="line.563"></a>
-<span class="sourceLineNo">564</span>        if (LOG.isDebugEnabled()) {<a name="line.564"></a>
-<span class="sourceLineNo">565</span>          LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);<a name="line.565"></a>
-<span class="sourceLineNo">566</span>        }<a name="line.566"></a>
-<span class="sourceLineNo">567</span>      } catch (Exception e) {<a name="line.567"></a>
-<span class="sourceLineNo">568</span>        // This swallows FNFE, should we be throwing it? seems more likely to indicate dev<a name="line.568"></a>
-<span class="sourceLineNo">569</span>        // misuse than a runtime problem with HDFS.<a name="line.569"></a>
-<span class="sourceLineNo">570</span>        if (!warningMap.containsKey(fs)) {<a name="line.570"></a>
-<span class="sourceLineNo">571</span>          warningMap.put(fs, true);<a name="line.571"></a>
-<span class="sourceLineNo">572</span>          LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path + ". " +<a name="line.572"></a>
-<span class="sourceLineNo">573</span>              "DEBUG log level might have more details.", e);<a name="line.573"></a>
-<span class="sourceLineNo">574</span>        } else if (LOG.isDebugEnabled()) {<a name="line.574"></a>
-<span class="sourceLineNo">575</span>          LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);<a name="line.575"></a>
-<span class="sourceLineNo">576</span>        }<a name="line.576"></a>
-<span class="sourceLineNo">577</span>        // check for lack of HDFS-7228<a name="line.577"></a>
-<span class="sourceLineNo">578</span>        if (e instanceof InvocationTargetException) {<a name="line.578"></a>
-<span class="sourceLineNo">579</span>          final Throwable exception = e.getCause();<a name="line.579"></a>
-<span class="sourceLineNo">580</span>          if (exception instanceof RemoteException &amp;&amp;<a name="line.580"></a>
-<span class="sourceLineNo">581</span>              HadoopIllegalArgumentException.class.getName().equals(<a name="line.581"></a>
-<span class="sourceLineNo">582</span>                ((RemoteException)exception).getClassName())) {<a name="line.582"></a>
-<span class="sourceLineNo">583</span>            if (LOG.isDebugEnabled()) {<a name="line.583"></a>
-<span class="sourceLineNo">584</span>              LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +<a name="line.584"></a>
-<span class="sourceLineNo">585</span>                "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +<a name="line.585"></a>
-<span class="sourceLineNo">586</span>                "trying to use SSD related policies then you're likely missing HDFS-7228. For " +<a name="line.586"></a>
-<span class="sourceLineNo">587</span>                "more information see the 'ArchivalStorage' docs for your Hadoop release.");<a name="line.587"></a>
-<span class="sourceLineNo">588</span>            }<a name="line.588"></a>
-<span class="sourceLineNo">589</span>          // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation<a name="line.589"></a>
-<span class="sourceLineNo">590</span>          // that throws UnsupportedOperationException<a name="line.590"></a>
-<span class="sourceLineNo">591</span>          } else if (exception instanceof UnsupportedOperationException) {<a name="line.591"></a>
-<span class="sourceLineNo">592</span>            if (LOG.isDebugEnabled()) {<a name="line.592"></a>
-<span class="sourceLineNo">593</span>              LOG.debug("The underlying FileSystem implementation doesn't support " +<a name="line.593"></a>
-<span class="sourceLineNo">594</span>                  "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 " +<a name="line.594"></a>
-<span class="sourceLineNo">595</span>                  "appears to be present in your version of Hadoop. For more information check " +<a name="line.595"></a>
-<span class="sourceLineNo">596</span>                  "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem " +<a name="line.596"></a>
-<span class="sourceLineNo">597</span>                  "specification docs from HADOOP-11981, and/or related documentation from the " +<a name="line.597"></a>
-<span class="sourceLineNo">598</span>                  "provider of the underlying FileSystem (its name should appear in the " +<a name="line.598"></a>
-<span class="sourceLineNo">599</span>                  "stacktrace that accompanies this message). Note in particular that Hadoop's " +<a name="line.599"></a>
-<span class="sourceLineNo">600</span>                  "local filesystem implementation doesn't support storage policies.", exception);<a name="line.600"></a>
-<span class="sourceLineNo">601</span>            }<a name="line.601"></a>
-<span class="sourceLineNo">602</span>          }<a name="line.602"></a>
-<span class="sourceLineNo">603</span>        }<a name="line.603"></a>
-<span class="sourceLineNo">604</span>      }<a name="line.604"></a>
-<span class="sourceLineNo">605</span>    }<a name="line.605"></a>
-<span class="sourceLineNo">606</span>  }<a name="line.606"></a>
-<span class="sourceLineNo">607</span><a name="line.607"></a>
-<span class="sourceLineNo">608</span>  /**<a name="line.608"></a>
-<span class="sourceLineNo">609</span>   * @param conf must not be null<a name="line.609"></a>
-<span class="sourceLineNo">610</span>   * @return True if this filesystem whose scheme is 'hdfs'.<a name="line.610"></a>
-<span class="sourceLineNo">611</span>   * @throws IOException from underlying FileSystem<a name="line.611"></a>
-<span class="sourceLineNo">612</span>   */<a name="line.612"></a>
-<span class="sourceLineNo">613</span>  public static boolean isHDFS(final Configuration conf) throws IOException {<a name="line.613"></a>
-<span class="sourceLineNo">614</span>    FileSystem fs = FileSystem.get(conf);<a name="line.614"></a>
-<span class="sourceLineNo">615</span>    String scheme = fs.getUri().getScheme();<a name="line.615"></a>
-<span class="sourceLineNo">616</span>    return scheme.equalsIgnoreCase("hdfs");<a name="line.616"></a>
-<span class="sourceLineNo">617</span>  }<a name="line.617"></a>
-<span class="sourceLineNo">618</span><a name="line.618"></a>
-<span class="sourceLineNo">619</span>  /**<a name="line.619"></a>
-<span class="sourceLineNo">620</span>   * Checks if the given path is the one with 'recovered.edits' dir.<a name="line.620"></a>
-<span class="sourceLineNo">621</span>   * @param path must not be null<a name="line.621"></a>
-<span class="sourceLineNo">622</span>   * @return True if we recovered edits<a name="line.622"></a>
-<span class="sourceLineNo">623</span>   */<a name="line.623"></a>
-<span class="sourceLineNo">624</span>  public static boolean isRecoveredEdits(Path path) {<a name="line.624"></a>
-<span class="sourceLineNo">625</span>    return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);<a name="line.625"></a>
-<span class="sourceLineNo">626</span>  }<a name="line.626"></a>
-<span class="sourceLineNo">627</span><a name="line.627"></a>
-<span class="sourceLineNo">628</span>  /**<a name="line.628"></a>
-<span class="sourceLineNo">629</span>   * @param conf must not be null<a name="line.629"></a>
-<span class="sourceLineNo">630</span>   * @return Returns the filesystem of the hbase rootdir.<a name="line.630"></a>
-<span class="sourceLineNo">631</span>   * @throws IOException from underlying FileSystem<a name="line.631"></a>
-<span class="sourceLineNo">632</span>   */<a name="line.632"></a>
-<span class="sourceLineNo">633</span>  public static FileSystem getCurrentFileSystem(Configuration conf)<a name="line.633"></a>
-<span class="sourceLineNo">634</span>  throws IOException {<a name="line.634"></a>
-<span class="sourceLineNo">635</span>    return getRootDir(conf).getFileSystem(conf);<a name="line.635"></a>
-<span class="sourceLineNo">636</span>  }<a name="line.636"></a>
-<span class="sourceLineNo">637</span><a name="line.637"></a>
-<span class="sourceLineNo">638</span>  /**<a name="line.638"></a>
-<span class="sourceLineNo">639</span>   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal<a name="line.639"></a>
-<span class="sourceLineNo">640</span>   * This accommodates differences between hadoop versions, where hadoop 1<a name="line.640"></a>
-<span class="sourceLineNo">641</span>   * does not throw a FileNotFoundException, and return an empty FileStatus[]<a name="line.641"></a>
-<span class="sourceLineNo">642</span>   * while Hadoop 2 will throw FileNotFoundException.<a name="line.642"></a>
-<span class="sourceLineNo">643</span>   *<a name="line.643"></a>
-<span class="sourceLineNo">644</span>   * Where possible, prefer FSUtils#listStatusWithStatusFilter(FileSystem,<a name="line.644"></a>
-<span class="sourceLineNo">645</span>   * Path, FileStatusFilter) instead.<a name="line.645"></a>
-<span class="sourceLineNo">646</span>   *<a name="line.646"></a>
-<span class="sourceLineNo">647</span>   * @param fs file system<a name="line.647"></a>
-<span class="sourceLineNo">648</span>   * @param dir directory<a name="line.648"></a>
-<span class="sourceLineNo">649</span>   * @param filter path filter<a name="line.649"></a>
-<span class="sourceLineNo">650</span>   * @return null if dir is empty or doesn't exist, otherwise FileStatus array<a name="line.650"></a>
-<span class="sourceLineNo">651</span>   */<a name="line.651"></a>
-<span class="sourceLineNo">652</span>  public static FileStatus [] listStatus(final FileSystem fs,<a name="line.652"></a>
-<span class="sourceLineNo">653</span>      final Path dir, final PathFilter filter) throws IOException {<a name="line.653"></a>
-<span class="sourceLineNo">654</span>    FileStatus [] status = null;<a name="line.654"></a>
-<span class="sourceLineNo">655</span>    try {<a name="line.655"></a>
-<span class="sourceLineNo">656</span>      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);<a name="line.656"></a>
-<span class="sourceLineNo">657</span>    } catch (FileNotFoundException fnfe) {<a name="line.657"></a>
-<span class="sourceLineNo">658</span>      // if directory doesn't exist, return null<a name="line.658"></a>
-<span class="sourceLineNo">659</span>      if (LOG.isTraceEnabled()) {<a name="line.659"></a>
-<span class="sourceLineNo">660</span>        LOG.trace(dir + " doesn't exist");<a name="line.660"></a>
-<span class="sourceLineNo">661</span>      }<a name="line.661"></a>
-<span class="sourceLineNo">662</span>    }<a name="line.662"></a>
-<span class="sourceLineNo">663</span>    if (status == null || status.length &lt; 1) {<a name="line.663"></a>
-<span class="sourceLineNo">664</span>      return null;<a name="line.664"></a>
-<span class="sourceLineNo">665</span>    }<a name="line.665"></a>
-<span class="sourceLineNo">666</span>    return status;<a name="line.666"></a>
-<span class="sourceLineNo">667</span>  }<a name="line.667"></a>
-<span class="sourceLineNo">668</span><a name="line.668"></a>
-<span class="sourceLineNo">669</span>  /**<a name="line.669"></a>
-<span class="sourceLineNo">670</span>   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal<a name="line.670"></a>
-<span class="sourceLineNo">671</span>   * This would accommodates differences between hadoop versions<a name="line.671"></a>
-<span class="sourceLineNo">672</span>   *<a name="line.672"></a>
-<span class="sourceLineNo">673</span>   * @param fs file system<a name="line.673"></a>
-<span class="sourceLineNo">674</span>   * @param dir directory<a name="line.674"></a>
-<span class="sourceLineNo">675</span>   * @return null if dir is empty or doesn't exist, otherwise FileStatus array<a name="line.675"></a>
-<span class="sourceLineNo">676</span>   */<a name="line.676"></a>
-<span class="sourceLineNo">677</span>  public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {<a name="line.677"></a>
-<span class="sourceLineNo">678</span>    return listStatus(fs, dir, null);<a name="line.678"></a>
-<span class="sourceLineNo">679</span>  }<a name="line.679"></a>
-<span class="sourceLineNo">680</span><a name="line.680"></a>
-<span class="sourceLineNo">681</span>  /**<a name="line.681"></a>
-<span class="sourceLineNo">682</span>   * Calls fs.listFiles() to get FileStatus and BlockLocations together for reducing rpc call<a name="line.682"></a>
-<span class="sourceLineNo">683</span>   *<a name="line.683"></a>
-<span class="sourceLineNo">684</span>   * @param fs file system<a name="line.684"></a>
-<span class="sourceLineNo">685</span>   * @param dir directory<a name="line.685"></a>
-<span class="sourceLineNo">686</span>   * @return LocatedFileStatus list<a name="line.686"></a>
-<span class="sourceLineNo">687</span>   */<a name="line.687"></a>
-<span class="sourceLineNo">688</span>  public static List&lt;LocatedFileStatus&gt; listLocatedStatus(final FileSystem fs,<a name="line.688"></a>
-<span class="sourceLineNo">689</span>      final Path dir) throws IOException {<a name="line.689"></a>
-<span class="sourceLineNo">690</span>    List&lt;LocatedFileStatus&gt; status = null;<a name="line.690"></a>
-<span class="sourceLineNo">691</span>    try {<a name="line.691"></a>
-<span class="sourceLineNo">692</span>      RemoteIterator&lt;LocatedFileStatus&gt; locatedFileStatusRemoteIterator = fs<a name="line.692"></a>
-<span class="sourceLineNo">693</span>          .listFiles(dir, false);<a name="line.693"></a>
-<span class="sourceLineNo">694</span>      while (locatedFileStatusRemoteIterator.hasNext()) {<a name="line.694"></a>
-<span class="sourceLineNo">695</span>        if (status == null) {<a name="line.695"></a>
-<span class="sourceLineNo">696</span>          status = Lists.newArrayList();<a name="line.696"></a>
-<span class="sourceLineNo">697</span>        }<a name="line.697"></a>
-<span class="sourceLineNo">698</span>        status.add(locatedFileStatusRemoteIterator.next());<a name="line.698"></a>
-<span class="sourceLineNo">699</span>      }<a name="line.699"></a>
-<span class="sourceLineNo">700</span>    } catch (FileNotFoundException fnfe) {<a name="line.700"></a>
-<span class="sourceLineNo">701</span>      // if directory doesn't exist, return null<a name="line.701"></a>
-<span class="sourceLineNo">702</span>      if (LOG.isTraceEnabled()) {<a name="line.702"></a>
-<span class="sourceLineNo">703</span>        LOG.trace(dir + " doesn't exist");<a name="line.703"></a>
+<span class="sourceLineNo">548</span>      toThrow = e;<a name="line.548"></a>
+<span class="sourceLineNo">549</span>      final String msg = "No access to setStoragePolicy on FileSystem from the SecurityManager; " +<a name="line.549"></a>
+<span class="sourceLineNo">550</span>          "HDFS-6584, HDFS-9345 not available. This is unusual and probably warrants an email " +<a name="line.550"></a>
+<span class="sourceLineNo">551</span>          "to the user@hbase mailing list. Please be sure to include a link to your configs, and " +<a name="line.551"></a>
+<span class="sourceLineNo">552</span>          "logs that include this message and period of time before it. Logs around service " +<a name="line.552"></a>
+<span class="sourceLineNo">553</span>          "start up will probably be useful as well.";<a name="line.553"></a>
+<span class="sourceLineNo">554</span>      if (!warningMap.containsKey(fs)) {<a name="line.554"></a>
+<span class="sourceLineNo">555</span>        warningMap.put(fs, true);<a name="line.555"></a>
+<span class="sourceLineNo">556</span>        LOG.warn(msg, e);<a name="line.556"></a>
+<span class="sourceLineNo">557</span>      } else if (LOG.isDebugEnabled()) {<a name="line.557"></a>
+<span class="sourceLineNo">558</span>        LOG.debug(msg, e);<a name="line.558"></a>
+<span class="sourceLineNo">559</span>      }<a name="line.559"></a>
+<span class="sourceLineNo">560</span>      m = null; // could happen on setAccessible() or getDeclaredMethod()<a name="line.560"></a>
+<span class="sourceLineNo">561</span>    }<a name="line.561"></a>
+<span class="sourceLineNo">562</span>    if (m != null) {<a name="line.562"></a>
+<span class="sourceLineNo">563</span>      try {<a name="line.563"></a>
+<span class="sourceLineNo">564</span>        m.invoke(fs, path, storagePolicy);<a name="line.564"></a>
+<span class="sourceLineNo">565</span>        if (LOG.isDebugEnabled()) {<a name="line.565"></a>
+<span class="sourceLineNo">566</span>          LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);<a name="line.566"></a>
+<span class="sourceLineNo">567</span>        }<a name="line.567"></a>
+<span class="sourceLineNo">568</span>      } catch (Exception e) {<a name="line.568"></a>
+<span class="sourceLineNo">569</span>        toThrow = e;<a name="line.569"></a>
+<span class="sourceLineNo">570</span>        // This swallows FNFE, should we be throwing it? seems more likely to indicate dev<a name="line.570"></a>
+<span class="sourceLineNo">571</span>        // misuse than a runtime problem with HDFS.<a name="line.571"></a>
+<span class="sourceLineNo">572</span>        if (!warningMap.containsKey(fs)) {<a name="line.572"></a>
+<span class="sourceLineNo">573</span>          warningMap.put(fs, true);<a name="line.573"></a>
+<span class="sourceLineNo">574</span>          LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path + ". " +<a name="line.574"></a>
+<span class="sourceLineNo">575</span>              "DEBUG log level might have more details.", e);<a name="line.575"></a>
+<span class="sourceLineNo">576</span>        } else if (LOG.isDebugEnabled()) {<a name="line.576"></a>
+<span class="sourceLineNo">577</span>          LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);<a name="line.577"></a>
+<span class="sourceLineNo">578</span>        }<a name="line.578"></a>
+<span class="sourceLineNo">579</span>        // check for lack of HDFS-7228<a name="line.579"></a>
+<span class="sourceLineNo">580</span>        if (e instanceof InvocationTargetException) {<a name="line.580"></a>
+<span class="sourceLineNo">581</span>          final Throwable exception = e.getCause();<a name="line.581"></a>
+<span class="sourceLineNo">582</span>          if (exception instanceof RemoteException &amp;&amp;<a name="line.582"></a>
+<span class="sourceLineNo">583</span>              HadoopIllegalArgumentException.class.getName().equals(<a name="line.583"></a>
+<span class="sourceLineNo">584</span>                ((RemoteException)exception).getClassName())) {<a name="line.584"></a>
+<span class="sourceLineNo">585</span>            if (LOG.isDebugEnabled()) {<a name="line.585"></a>
+<span class="sourceLineNo">586</span>              LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +<a name="line.586"></a>
+<span class="sourceLineNo">587</span>                "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +<a name="line.587"></a>
+<span class="sourceLineNo">588</span>                "trying to use SSD related policies then you're likely missing HDFS-7228. For " +<a name="line.588"></a>
+<span class="sourceLineNo">589</span>                "more information see the 'ArchivalStorage' docs for your Hadoop release.");<a name="line.589"></a>
+<span class="sourceLineNo">590</span>            }<a name="line.590"></a>
+<span class="sourceLineNo">591</span>          // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a default implementation<a name="line.591"></a>
+<span class="sourceLineNo">592</span>          // that throws UnsupportedOperationException<a name="line.592"></a>
+<span class="sourceLineNo">593</span>          } else if (exception instanceof UnsupportedOperationException) {<a name="line.593"></a>
+<span class="sourceLineNo">594</span>            if (LOG.isDebugEnabled()) {<a name="line.594"></a>
+<span class="sourceLineNo">595</span>              LOG.debug("The underlying FileSystem implementation doesn't support " +<a name="line.595"></a>
+<span class="sourceLineNo">596</span>                  "setStoragePolicy. This is probably intentional on their part, since HDFS-9345 " +<a name="line.596"></a>
+<span class="sourceLineNo">597</span>                  "appears to be present in your version of Hadoop. For more information check " +<a name="line.597"></a>
+<span class="sourceLineNo">598</span>                  "the Hadoop documentation on 'ArchivalStorage', the Hadoop FileSystem " +<a name="line.598"></a>
+<span class="sourceLineNo">599</span>                  "specification docs from HADOOP-11981, and/or related documentation from the " +<a name="line.599"></a>
+<span class="sourceLineNo">600</span>                  "provider of the underlying FileSystem (its name should appear in the " +<a name="line.600"></a>
+<span class="sourceLineNo">601</span>                  "stacktrace that accompanies this message). Note in particular that Hadoop's " +<a name="line.601"></a>
+<span class="sourceLineNo">602</span>                  "local filesystem implementation doesn't support storage policies.", exception);<a name="line.602"></a>
+<span class="sourceLineNo">603</span>            }<a name="line.603"></a>
+<span class="sourceLineNo">604</span>          }<a name="line.604"></a>
+<span class="sourceLineNo">605</span>        }<a name="line.605"></a>
+<span class="sourceLineNo">606</span>      }<a name="line.606"></a>
+<span class="sourceLineNo">607</span>    }<a name="line.607"></a>
+<span class="sourceLineNo">608</span>    if (toThrow != null) {<a name="line.608"></a>
+<span class="sourceLineNo">609</span>      throw new IOException(toThrow);<a name="line.609"></a>
+<span class="sourceLineNo">610</span>    }<a name="line.610"></a>
+<span class="sourceLineNo">611</span>  }<a name="line.611"></a>
+<span class="sourceLineNo">612</span><a name="line.612"></a>
+<span class="sourceLineNo">613</span>  /**<a name="line.613"></a>
+<span class="sourceLineNo">614</span>   * @param conf must not be null<a name="line.614"></a>
+<span class="sourceLineNo">615</span>   * @return True if this filesystem whose scheme is 'hdfs'.<a name="line.615"></a>
+<span class="sourceLineNo">616</span>   * @throws IOException from underlying FileSystem<a name="line.616"></a>
+<span class="sourceLineNo">617</span>   */<a name="line.617"></a>
+<span class="sourceLineNo">618</span>  public static boolean isHDFS(final Configuration conf) throws IOException {<a name="line.618"></a>
+<span class="sourceLineNo">619</span>    FileSystem fs = FileSystem.get(conf);<a name="line.619"></a>
+<span class="sourceLineNo">620</span>    String scheme = fs.getUri().getScheme();<a name="line.620"></a>
+<span class="sourceLineNo">621</span>    return scheme.equalsIgnoreCase("hdfs");<a name="line.621"></a>
+<span class="sourceLineNo">622</span>  }<a name="line.622"></a>
+<span class="sourceLineNo">623</span><a name="line.623"></a>
+<span class="sourceLineNo">624</span>  /**<a name="line.624"></a>
+<span class="sourceLineNo">625</span>   * Checks if the given path is the one with 'recovered.edits' dir.<a name="line.625"></a>
+<span class="sourceLineNo">626</span>   * @param path must not be null<a name="line.626"></a>
+<span class="sourceLineNo">627</span>   * @return True if we recovered edits<a name="line.627"></a>
+<span class="sourceLineNo">628</span>   */<a name="line.628"></a>
+<span class="sourceLineNo">629</span>  public static boolean isRecoveredEdits(Path path) {<a name="line.629"></a>
+<span class="sourceLineNo">630</span>    return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);<a name="line.630"></a>
+<span class="sourceLineNo">631</span>  }<a name="line.631"></a>
+<span class="sourceLineNo">632</span><a name="line.632"></a>
+<span class="sourceLineNo">633</span>  /**<a name="line.633"></a>
+<span class="sourceLineNo">634</span>   * @param conf must not be null<a name="line.634"></a>
+<span class="sourceLineNo">635</span>   * @return Returns the filesystem of the hbase rootdir.<a name="line.635"></a>
+<span class="sourceLineNo">636</span>   * @throws IOException from underlying FileSystem<a name="line.636"></a>
+<span class="sourceLineNo">637</span>   */<a name="line.637"></a>
+<span class="sourceLineNo">638</span>  public static FileSystem getCurrentFileSystem(Configuration conf)<a name="line.638"></a>
+<span class="sourceLineNo">639</span>  throws IOException {<a name="line.639"></a>
+<span class="sourceLineNo">640</span>    return getRootDir(conf).getFileSystem(conf);<a name="line.640"></a>
+<span class="sourceLineNo">641</span>  }<a name="line.641"></a>
+<span class="sourceLineNo">642</span><a name="line.642"></a>
+<span class="sourceLineNo">643</span>  /**<a name="line.643"></a>
+<span class="sourceLineNo">644</span>   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal<a name="line.644"></a>
+<span class="sourceLineNo">645</span>   * This accommodates differences between hadoop versions, where hadoop 1<a name="line.645"></a>
+<span class="sourceLineNo">646</span>   * does not throw a FileNotFoundException, and return an empty FileStatus[]<a name="line.646"></a>
+<span class="sourceLineNo">647</span>   * while Hadoop 2 will throw FileNotFoundException.<a name="line.647"></a>
+<span class="sourceLineNo">648</span>   *<a name="line.648"></a>
+<span class="sourceLineNo">649</span>   * Where possible, prefer FSUtils#listStatusWithStatusFilter(FileSystem,<a name="line.649"></a>
+<span class="sourceLineNo">650</span>   * Path, FileStatusFilter) instead.<a name="line.650"></a>
+<span class="sourceLineNo">651</span>   *<a name="line.651"></a>
+<span class="sourceLineNo">652</span>   * @param fs file system<a name="line.652"></a>
+<span class="sourceLineNo">653</span>   * @param dir directory<a name="line.653"></a>
+<span class="sourceLineNo">654</span>   * @param filter path filter<a name="line.654"></a>
+<span class="sourceLineNo">655</span>   * @return null if dir is empty or doesn't exist, otherwise FileStatus array<a name="line.655"></a>
+<span class="sourceLineNo">656</span>   */<a name="line.656"></a>
+<span class="sourceLineNo">657</span>  public static FileStatus [] listStatus(final FileSystem fs,<a name="line.657"></a>
+<span class="sourceLineNo">658</span>      final Path dir, final PathFilter filter) throws IOException {<a name="line.658"></a>
+<span class="sourceLineNo">659</span>    FileStatus [] status = null;<a name="line.659"></a>
+<span class="sourceLineNo">660</span>    try {<a name="line.660"></a>
+<span class="sourceLineNo">661</span>      status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);<a name="line.661"></a>
+<span class="sourceLineNo">662</span>    } catch (FileNotFoundException fnfe) {<a name="line.662"></a>
+<span class="sourceLineNo">663</span>      // if directory doesn't exist, return null<a name="line.663"></a>
+<span class="sourceLineNo">664</span>      if (LOG.isTraceEnabled()) {<a name="line.664"></a>
+<span class="sourceLineNo">665</span>        LOG.trace(dir + " doesn't exist");<a name="line.665"></a>
+<span class="sourceLineNo">666</span>      }<a name="line.666"></a>
+<span class="sourceLineNo">667</span>    }<a name="line.667"></a>
+<span class="sourceLineNo">668</span>    if (status == null || status.length &lt; 1) {<a name="line.668"></a>
+<span class="sourceLineNo">669</span>      return null;<a name="line.669"></a>
+<span class="sourceLineNo">670</span>    }<a name="line.670"></a>
+<span class="sourceLineNo">671</span>    return status;<a name="line.671"></a>
+<span class="sourceLineNo">672</span>  }<a name="line.672"></a>
+<span class="sourceLineNo">673</span><a name="line.673"></a>
+<span class="sourceLineNo">674</span>  /**<a name="line.674"></a>
+<span class="sourceLineNo">675</span>   * Calls fs.listStatus() and treats FileNotFoundException as non-fatal<a name="line.675"></a>
+<span class="sourceLineNo">676</span>   * This would accommodates differences between hadoop versions<a name="line.676"></a>
+<span class="sourceLineNo">677</span>   *<a name="line.677"></a>
+<span class="sourceLineNo">678</span>   * @param fs file system<a name="line.678"></a>
+<span class="sourceLineNo">679</span>   * @param dir directory<a name="line.679"></a>
+<span class="sourceLineNo">680</span>   * @return null if dir is empty or doesn't exist, otherwise FileStatus array<a name="line.680"></a>
+<span class="sourceLineNo">681</span>   */<a name="line.681"></a>
+<span class="sourceLineNo">682</span>  public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {<a name="line.682"></a>
+<span class="sourceLineNo">683</span>    return listStatus(fs, dir, null);<a name="line.683"></a>
+<span class="sourceLineNo">684</span>  }<a name="line.684"></a>
+<span class="sourceLineNo">685</span><a name="line.685"></a>
+<span class="sourceLineNo">686</span>  /**<a name="line.686"></a>
+<span class="sourceLineNo">687</span>   * Calls fs.listFiles() to get FileStatus and BlockLocations together for reducing rpc call<a name="line.687"></a>
+<span class="sourceLineNo">688</span>   *<a name="line.688"></a>
+<span class="sourceLineNo">689</span>   * @param fs file system<a name="line.689"></a>
+<span class="sourceLineNo">690</span>   * @param dir directory<a name="line.690"></a>
+<span class="sourceLineNo">691</span>   * @return LocatedFileStatus list<a name="line.691"></a>
+<span class="sourceLineNo">692</span>   */<a name="line.692"></a>
+<span class="sourceLineNo">693</span>  public static List&lt;LocatedFileStatus&gt; listLocatedStatus(final FileSystem fs,<a name="line.693"></a>
+<span class="sourceLineNo">694</span>      final Path dir) throws IOException {<a name="line.694"></a>
+<span class="sourceLineNo">695</span>    List&lt;LocatedFileStatus&gt; status = null;<a name="line.695"></a>
+<span class="sourceLineNo">696</span>    try {<a name="line.696"></a>
+<span class="sourceLineNo">697</span>      RemoteIterator&lt;LocatedFileStatus&gt; locatedFileStatusRemoteIterator = fs<a name="line.697"></a>
+<span class="sourceLineNo">698</span>          .listFiles(dir, false);<a name="line.698"></a>
+<span class="sourceLineNo">699</span>      while (locatedFileStatusRemoteIterator.hasNext()) {<a name="line.699"></a>
+<span class="sourceLineNo">700</span>        if (status == null) {<a name="line.700"></a>
+<span class="sourceLineNo">701</span>          status = Lists.newArrayList();<a name="line.701"></a>
+<span class="sourceLineNo">702</span>        }<a name="line.702"></a>
+<span class="sourceLineNo">703</span>        status.add(locatedFileStatusRemoteIterator.next());<a name="line.703"></a>
 <span class="sourceLineNo">704</span>      }<a name="line.704"></a>
-<span class="sourceLineNo">705</span>    }<a name="line.705"></a>
-<span class="sourceLineNo">706</span>    return status;<a name="line.706"></a>
-<span class="sourceLineNo">707</span>  }<a name="line.707"></a>
-<span class="sourceLineNo">708</span><a name="line.708"></a>
-<span class="sourceLineNo">709</span>  /**<a name="line.709"></a>
-<span class="sourceLineNo">710</span>   * Calls fs.delete() and returns the value returned by the fs.delete()<a name="line.710"></a>
-<span class="sourceLineNo">711</span>   *<a name="line.711"></a>
-<span class="sourceLineNo">712</span>   * @param fs must not be null<a name="line.712"></a>
-<span class="sourceLineNo">713</span>   * @param path must not be null<a name="line.713"></a>
-<span class="sourceLineNo">714</span>   * @param recursive delete tree rooted at path<a name="line.714"></a>
-<span class="sourceLineNo">715</span>   * @return the value returned by the fs.delete()<a name="line.715"></a>
-<span class="sourceLineNo">716</span>   * @throws IOException from underlying FileSystem<a name="line.716"></a>
-<span class="sourceLineNo">717</span>   */<a name="line.717"></a>
-<span class="sourceLineNo">718</span>  public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)<a name="line.718"></a>
-<span class="sourceLineNo">719</span>      throws IOException {<a name="line.719"></a>
-<span class="sourceLineNo">720</span>    return fs.delete(path, recursive);<a name="line.720"></a>
-<span class="sourceLineNo">721</span>  }<a name="line.721"></a>
-<span class="sourceLineNo">722</span><a name="line.722"></a>
-<span class="sourceLineNo">723</span>  /**<a name="line.723"></a>
-<span class="sourceLineNo">724</span>   * Calls fs.exists(). Checks if the specified path exists<a name="line.724"></a>
-<span class="sourceLineNo">725</span>   *<a name="line.725"></a>
-<span class="sourceLineNo">726</span>   * @param fs must not be null<a name="line.726"></a>
-<span class="sourceLineNo">727</span>   * @param path must not be null<a name="line.727"></a>
-<span class="sourceLineNo">728</span>   * @return the value returned by fs.exists()<a name="line.728"></a>
-<span class="sourceLineNo">729</span>   * @throws IOException from underlying FileSystem<a name="line.729"></a>
-<span class="sourceLineNo">730</span>   */<a name="line.730"></a>
-<span class="sourceLineNo">731</span>  public static boolean isExists(final FileSystem fs, final Path path) throws IOException {<a name="line.731"></a>
-<span class="sourceLineNo">732</span>    return fs.exists(path);<a name="line.732"></a>
-<span class="sourceLineNo">733</span>  }<a name="line.733"></a>
-<span class="sourceLineNo">734</span><a name="line.734"></a>
-<span class="sourceLineNo">735</span>  /**<a name="line.735"></a>
-<span class="sourceLineNo">736</span>   * Log the current state of the filesystem from a certain root directory<a name="line.736"></a>
-<span class="sourceLineNo">737</span>   * @param fs filesystem to investigate<a name="line.737"></a>
-<span class="sourceLineNo">738</span>   * @param root root file/directory to start logging from<a name="line.738"></a>
-<span class="sourceLineNo">739</span>   * @param LOG log to output information<a name="line.739"></a>
-<span class="sourceLineNo">740</span>   * @throws IOException if an unexpected exception occurs<a name="line.740"></a>
-<span class="sourceLineNo">741</span>   */<a name="line.741"></a>
-<span class="sourceLineNo">742</span>  public static void logFileSystemState(final FileSystem fs, final Path root, Logger LOG)<a name="line.742"></a>
-<span class="sourceLineNo">743</span>      throws IOException {<a name="line.743"></a>
-<span class="sourceLineNo">744</span>    LOG.debug("File system contents for path " + root);<a name="line.744"></a>
-<span class="sourceLineNo">745</span>    logFSTree(LOG, fs, root, "|-");<a name="line.745"></a>
-<span class="sourceLineNo">746</span>  }<a name="line.746"></a>
-<span class="sourceLineNo">747</span><a name="line.747"></a>
-<span class="sourceLineNo">748</span>  /**<a name="line.748"></a>
-<span class="sourceLineNo">749</span>   * Recursive helper to log the state of the FS<a name="line.749"></a>
-<span class="sourceLineNo">750</span>   *<a name="line.750"></a>
-<span class="sourceLineNo">751</span>   * @see #logFileSystemState(FileSystem, Path, Logger)<a name="line.751"></a>
-<span class="sourceLineNo">752</span>   */<a name="line.752"></a>
-<span class="sourceLineNo">753</span>  private static void logFSTree(Logger LOG, final FileSystem fs, final Path root, String prefix)<a name="line.753"></a>
-<span class="sourceLineNo">754</span>      throws IOException {<a name="line.754"></a>
-<span class="sourceLineNo">755</span>    FileStatus[] files = listStatus(fs, root, null);<a name="line.755"></a>
-<span class="sourceLineNo">756</span>    if (files == null) {<a name="line.756"></a>
-<span class="sourceLineNo">757</span>      return;<a name="line.757"></a>
-<span class="sourceLineNo">758</span>    }<a name="line.758"></a>
-<span class="sourceLineNo">759</span><a name="line.759"></a>
-<span class="sourceLineNo">760</span>    for (FileStatus file : files) {<a name="line.760"></a>
-<span class="sourceLineNo">761</span>      if (file.isDirectory()) {<a name="line.761"></a>
-<span class="sourceLineNo">762</span>        LOG.debug(prefix + file.getPath().getName() + "/");<a name="line.762"></a>
-<span class="sourceLineNo">763</span>        logFSTree(LOG, fs, file.getPath(), prefix + "---");<a name="line.763"></a>
-<span class="sourceLineNo">764</span>      } else {<a name="line.764"></a>
-<span class="sourceLineNo">765</span>        LOG.debug(prefix + file.getPath().getName());<a name="line.765"></a>
-<span class="sourceLineNo">766</span>      }<a name="line.766"></a>
-<span class="sourceLineNo">767</span>    }<a name="line.767"></a>
-<span class="sourceLineNo">768</span>  }<a name="line.768"></a>
-<span class="sourceLineNo">769</span><a name="line.769"></a>
-<span class="sourceLineNo">770</span>  public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)<a name="line.770"></a>
-<span class="sourceLineNo">771</span>      throws IOException {<a name="line.771"></a>
-<span class="sourceLineNo">772</span>    // set the modify time for TimeToLive Cleaner<a name="line.772"></a>
-<span class="sourceLineNo">773</span>    fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);<a name="line.773"></a>
-<span class="sourceLineNo">774</span>    return fs.rename(src, dest);<a name="line.774"></a>
-<span class="sourceLineNo">775</span>  }<a name="line.775"></a>
-<span class="sourceLineNo">776</span><a name="line.776"></a>
-<span class="sourceLineNo">777</span>  /**<a name="line.777"></a>
-<span class="sourceLineNo">778</span>   * Do our short circuit read setup.<a name="line.778"></a>
-<span class="sourceLineNo">779</span>   * Checks buffer size to use and whether to do checksumming in hbase or hdfs.<a name="line.779"></a>
-<span class="sourceLineNo">780</span>   * @param conf must not be null<a name="line.780"></a>
-<span class="sourceLineNo">781</span>   */<a name="line.781"></a>
-<span class="sourceLineNo">782</span>  public static void setupShortCircuitRead(final Configuration conf) {<a name="line.782"></a>
-<span class="sourceLineNo">783</span>    // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.<a name="line.783"></a>
-<span class="sourceLineNo">784</span>    boolean shortCircuitSkipChecksum =<a name="line.784"></a>
-<span class="sourceLineNo">785</span>      conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);<a name="line.785"></a>
-<span class="sourceLineNo">786</span>    boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);<a name="line.786"></a>
-<span class="sourceLineNo">787</span>    if (shortCircuitSkipChecksum) {<a name="line.787"></a>
-<span class="sourceLineNo">788</span>      LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +<a name="line.788"></a>
-<span class="sourceLineNo">789</span>        "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +<a name="line.789"></a>
-<span class="sourceLineNo">790</span>        "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));<a name="line.790"></a>
-<span class="sourceLineNo">791</span>      assert !shortCircuitSkipChecksum; //this will fail if assertions are on<a name="line.791"></a>
-<span class="sourceLineNo">792</span>    }<a name="line.792"></a>
-<span class="sourceLineNo">793</span>    checkShortCircuitReadBufferSize(conf);<a name="line.793"></a>
-<span class="sourceLineNo">794</span>  }<a name="line.794"></a>
-<span class="sourceLineNo">795</span><a name="line.795"></a>
-<span class="sourceLineNo">796</span>  /**<a name="line.796"></a>
-<span class="sourceLineNo">797</span>   * Check if short circuit read buffer size is set and if not, set it to hbase value.<a name="line.797"></a>
-<span class="sourceLineNo">798</span>   * @param conf must not be null<a name="line.798"></a>
-<span class="sourceLineNo">799</span>   */<a name="line.799"></a>
-<span class="sourceLineNo">800</span>  public static void checkShortCircuitReadBufferSize(final Configuration conf) {<a name="line.800"></a>
-<span class="sourceLineNo">801</span>    final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;<a name="line.801"></a>
-<span class="sourceLineNo">802</span>    final int notSet = -1;<a name="line.802"></a>
-<span class="sourceLineNo">803</span>    // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2<a name="line.803"></a>
-<span class="sourceLineNo">804</span>    final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";<a name="line.804"></a>
-<span class="sourceLineNo">805</span>    int size = conf.getInt(dfsKey, notSet);<a name="line.805"></a>
-<span class="sourceLineNo">806</span>    // If a size is set, return -- we will use it.<a name="line.806"></a>
-<span class="sourceLineNo">807</span>    if (size != notSet) {<a name="line.807"></a>
-<span class="sourceLineNo">808</span>      return;<a name="line.808"></a>
-<span class="sourceLineNo">809</span>    }<a name="line.809"></a>
-<span class="sourceLineNo">810</span>    // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.<a name="line.810"></a>
-<span class="sourceLineNo">811</span>    int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);<a name="line.811"></a>
-<span class="sourceLineNo">812</span>    conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));<a name="line.812"></a>
-<span class="sourceLineNo">813</span>  }<a name="line.813"></a>
-<span class="sourceLineNo">814</span><a name="line.814"></a>
-<span class="sourceLineNo">815</span>  // Holder singleton idiom. JVM spec ensures this will be run at most once per Classloader, and<a name="line.815"></a>
-<span class="sourceLineNo">816</span>  // not until we attempt to reference it.<a name="line.816"></a>
-<span class="sourceLineNo">817</span>  private static class StreamCapabilities {<a name="line.817"></a>
-<span class="sourceLineNo">818</span>    public static final boolean PRESENT;<a name="line.818"></a>
-<span class="sourceLineNo">819</span>    public static final Class&lt;?&gt; CLASS;<a name="line.819"></a>
-<span class="sourceLineNo">820</span>    public static final Method METHOD;<a name="line.820"></a>
-<span class="sourceLineNo">821</span>    static {<a name="line.821"></a>
-<span class="sourceLineNo">822</span>      boolean tmp = false;<a name="line.822"></a>
-<span class="sourceLineNo">823</span>      Class&lt;?&gt; clazz = null;<a name="line.823"></a>
-<span class="sourceLineNo">824</span>      Method method = null;<a name="line.824"></a>
-<span class="sourceLineNo">825</span>      try {<a name="line.825"></a>
-<span class="sourceLineNo">826</span>        clazz = Class.forName("org.apache.hadoop.fs.StreamCapabilities");<a name="line.826"></a>
-<span class="sourceLineNo">827</span>        method = clazz.getMethod("hasCapability", String.class);<a name="line.827"></a>
-<span class="sourceLineNo">828</span>        tmp = true;<a name="line.828"></a>
-<span class="sourceLineNo">829</span>      } catch(ClassNotFoundException|NoSuchMethodException|SecurityException exception) {<a name="line.829"></a>
-<span class="sourceLineNo">830</span>        LOG.warn("Your Hadoop installation does not include the StreamCapabilities class from " +<a name="line.830"></a>
-<span class="sourceLineNo">831</span>                 "HDFS-11644, so we will skip checking if any FSDataOutputStreams actually " +<a name="line.831"></a>
-<span class="sourceLineNo">832</span>                 "support hflush/hsync. If you are running on top of HDFS this probably just " +<a name="line.832"></a>
-<span class="sourceLineNo">833</span>                 "means you have an older version and this can be ignored. If you are running on " +<a name="line.833"></a>
-<span class="sourceLineNo">834</span>                 "top of an alternate FileSystem implementation you should manually verify that " +<a name="line.834"></a>
-<span class="sourceLineNo">835</span>                 "hflush and hsync are implemented; otherwise you risk data loss and hard to " +<a name="line.835"></a>
-<span class="sourceLineNo">836</span>                 "diagnose errors when our assumptions are violated.");<a name="line.836"></a>
-<span class="sourceLineNo">837</span>        LOG.debug("The first request to check for StreamCapabilities came from this stacktrace.",<a name="line.837"></a>
-<span class="sourceLineNo">838</span>            exception);<a name="line.838"></a>
-<span class="sourceLineNo">839</span>      } finally {<a name="line.839"></a>
-<span class="sourceLineNo">840</span>        PRESENT = tmp;<a name="line.840"></a>
-<span class="sourceLineNo">841</span>        CLASS = clazz;<a name="line.841"></a>
-<span class="sourceLineNo">842</span>        METHOD = method;<a name="line.842"></a>
-<span class="sourceLineNo">843</span>      }<a name="line.843"></a>
-<span class="sourceLineNo">844</span>    }<a name="line.844"></a>
-<span class="sourceLineNo">845</span>  }<a name="line.845"></a>
-<span class="sourceLineNo">846</span><a name="line.846"></a>
-<span class="sourceLineNo">847</span>  /**<a name="line.847"></a>
-<span class="sourceLineNo">848</span>   * If our FileSystem version includes the StreamCapabilities class, check if<a name="line.848"></a>
-<span class="sourceLineNo">849</span>   * the given stream has a particular capability.<a name="line.849"></a>
-<span class="sourceLineNo">850</span>   * @param stream capabilities are per-stream instance, so check this one specifically. must not be<a name="line.850"></a>
-<span class="sourceLineNo">851</span>   *        null<a name="line.851"></a>
-<span class="sourceLineNo">852</span>   * @param capability what to look for, per Hadoop Common's FileSystem docs<a name="line.852"></a>
-<span class="sourceLineNo">853</span>   * @return true if there are no StreamCapabilities. false if there are, but this stream doesn't<a name="line.853"></a>
-<span class="sourceLineNo">854</span>   *         implement it. return result of asking the stream otherwise.<a name="line.854"></a>
-<span class="sourceLineNo">855</span>   */<a name="line.855"></a>
-<span class="sourceLineNo">856</span>  public static boolean hasCapability(FSDataOutputStream stream, String capability) {<a name="line.856"></a>
-<span class="sourceLineNo">857</span>    // be consistent whether or not StreamCapabilities is present<a name="line.857"></a>
-<span class="sourceLineNo">858</span>    if (stream == null) {<a name="line.858"></a>
-<span class="sourceLineNo">859</span>      throw new NullPointerException("stream parameter must not be null.");<a name="line.859"></a>
-<span class="sourceLineNo">860</span>    }<a name="line.860"></a>
-<span class="sourceLineNo">861</span>    // If o.a.h.fs.StreamCapabilities doesn't exist, assume everyone does everything<a name="line.861"></a>
-<span class="sourceLineNo">862</span>    // otherwise old versions of Hadoop will break.<a name="line.862"></a>
-<span class="sourceLineNo">863</span>    boolean result = true;<a name="line.863"></a>
-<span class="sourceLineNo">864</span>    if (StreamCapabilities.PRESENT) {<a name="line.864"></a>
-<span class="sourceLineNo">865</span>      // if StreamCapabilities is present, but the stream doesn't implement it<a name="line.865"></a>
-<span class="sourceLineNo">866</span>      // or we run into a problem invoking the method,<a name="line.866"></a>
-<span class="sourceLineNo">867</span>      // we treat that as equivalent to not declaring anything<a name="line.867"></a>
-<span class="sourceLineNo">868</span>      result = false;<a name="line.868"></a>
-<span class="sourceLineNo">869</span>      if (StreamCapabilities.CLASS.isAssignableFrom(stream.getClass())) {<a name="line.869"></a>
-<span class="sourceLineNo">870</span>        try {<a name="line.870"></a>
-<span class="sourceLineNo">871</span>          result = ((Boolean)StreamCapabilities.METHOD.invoke(stream, capability)).booleanValue();<a name="line.871"></a>
-<span class="sourceLineNo">872</span>        } catch (IllegalAccessException|IllegalArgumentException|InvocationTargetException<a name="line.872"></a>
-<span class="sourceLineNo">873</span>            exception) {<a name="line.873"></a>
-<span class="sourceLineNo">874</span>          LOG.warn("Your Hadoop installation's StreamCapabilities implementation doesn't match " +<a name="line.874"></a>
-<span class="sourceLineNo">875</span>              "our understanding of how it's supposed to work. Please file a JIRA and include " +<a name="line.875"></a>
-<span class="sourceLineNo">876</span>              "the following stack trace. In the mean time we're interpreting this behavior " +<a name="line.876"></a>
-<span class="sourceLineNo">877</span>              "difference as a lack of capability support, which will probably cause a failure.",<a name="line.877"></a>
-<span class="sourceLineNo">878</span>              exception);<a name="line.878"></a>
-<span class="sourceLineNo">879</span>        }<a name="line.879"></a>
-<span class="sourceLineNo">880</span>      }<a name="line.880"></a>
-<span class="sourceLineNo">881</span>    }<a name="line.881"></a>
-<span class="sourceLineNo">882</span>    return result;<a name="line.882"></a>
-<span class="sourceLineNo">883</span>  }<a name="line.883"></a>
-<span class="sourceLineNo">884</span><a name="line.884"></a>
-<span class="sourceLineNo">885</span>  /**<a name="line.885"></a>
-<span class="sourceLineNo">886</span>   * Helper exception for those cases where the place where we need to check a stream capability<a name="line.886"></a>
-<span class="sourceLineNo">887</span>   * is not where we have the needed context to explain the impact and mitigation for a lack.<a name="line.887"></a>
-<span class="sourceLineNo">888</span>   */<a name="line.888"></a>
-<span class="sourceLineNo">889</span>  public static class StreamLacksCapabilityException extends Exception {<a name="line.889"></a>
-<span class="sourceLineNo">890</span>    public StreamLacksCapabilityException(String message, Throwable cause) {<a name="line.890"></a>
-<span class="sourceLineNo">891</span>      super(message, cause);<a name="line.891"></a>
-<span class="sourceLineNo">892</span>    }<a name="line.892"></a>
-<span class="sourceLineNo">893</span>    public StreamLacksCapabilityException(String message) {<a name="line.893"></a>
-<span class="sourceLineNo">894</span>      super(message);<a name="line.894"></a>
-<span class="sourceLineNo">895</span>    }<a name="line.895"></a>
-<span class="sourceLineNo">896</span>  }<a name="line.896"></a>
-<span class="sourceLineNo">897</span><a name="line.897"></a>
-<span class="sourceLineNo">898</span>}<a name="line.898"></a>
+<span class="sourceLineNo">705</span>    } catch (FileNotFoundException fnfe) {<a name="line.705"></a>
+<span class="sourceLineNo">706</span>      // if directory doesn't exist, return null<a name="line.706"></a>
+<span class="sourceLineNo">707</span>      if (LOG.isTraceEnabled()) {<a name="line.707"></a>
+<span class="sourceLineNo">708</span>        LOG.trace(dir + " doesn't exist");<a name="line.708"></a>
+<span class="sourceLineNo">709</span>      }<a name="line.709"></a>
+<span class="sourceLineNo">710</span>    }<a name="line.710"></a>
+<span class="sourceLineNo">711</span>    return status;<a name="line.711"></a>
+<span class="sourceLineNo">712</span>  }<a name="line.712"></a>
+<span class="sourceLineNo">713</span><a name="line.713"></a>
+<span class="sourceLineNo">714</span>  /**<a name="line.714"></a>
+<span class="sourceLineNo">715</span>   * Calls fs.delete() and returns the value returned by the fs.delete()<a name="line.715"></a>
+<span class="sourceLineNo">716</span>   *<a name="line.716"></a>
+<span class="sourceLineNo">717</span>   * @param fs must not be null<a name="line.717"></a>
+<span class="sourceLineNo">718</span>   * @param path must not be null<a name="line.718"></a>
+<span class="sourceLineNo">719</span>   * @param recursive delete tree rooted at path<a name="line.719"></a>
+<span class="sourceLineNo">720</span>   * @return the value returned by the fs.delete()<a name="line.720"></a>
+<span class="sourceLineNo">721</span>   * @throws IOException from underlying FileSystem<a name="line.721"></a>
+<span class="sourceLineNo">722</span>   */<a name="line.722"></a>
+<span class="sourceLineNo">723</span>  public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)<a name="line.723"></a>
+<span class="sourceLineNo">724</span>      throws IOException {<a name="line.724"></a>
+<span class="sourceLineNo">725</span>    return fs.delete(path, recursive);<a name="line.725"></a>
+<span class="sourceLineNo">726</span>  }<a name="line.726"></a>
+<span class="sourceLineNo">727</span><a name="line.727"></a>
+<span class="sourceLineNo">728</span>  /**<a name="line.728"></a>
+<span class="sourceLineNo">729</span>   * Calls fs.exists(). Checks if the specified path exists<a name="line.729"></a>
+<span class="sourceLineNo">730</span>   *<a name="line.730"></a>
+<span class="sourceLineNo">731</span>   * @param fs must not be null<a name="line.731"></a>
+<span class="sourceLineNo">732</span>   * @param path must not be null<a name="line.732"></a>
+<span class="sourceLineNo">733</span>   * @return the value returned by fs.exists()<a name="line.733"></a>
+<span class="sourceLineNo">734</span>   * @throws IOException from underlying FileSystem<a name="line.734"></a>
+<span class="sourceLineNo">735</span>   */<a name="line.735"></a>
+<span class="sourceLineNo">736</span>  public static boolean isExists(final FileSystem fs, final Path path) throws IOException {<a name="line.736"></a>
+<span class="sourceLineNo">737</span>    return fs.exists(path);<a name="line.737"></a>
+<span class="sourceLineNo">738</span>  }<a name="line.738"></a>
+<span class="sourceLineNo">739</span><a name="line.739"></a>
+<span class="sourceLineNo">740</span>  /**<a name="line.740"></a>
+<span class="sourceLineNo">741</span>   * Log the current state of the filesystem from a certain root directory<a name="line.741"></a>
+<span class="sourceLineNo">742</span>   * @param fs filesystem to investigate<a name="line.742"></a>
+<span class="sourceLineNo">743</span>   * @param root root file/directory to start logging from<a name="line.743"></a>
+<span class="sourceLineNo">744</span>   * @param LOG log to output information<a name="line.744"></a>
+<span class="sourceLineNo">745</span>   * @throws IOException if an unexpected exception occurs<a name="line.745"></a>
+<span class="sourceLineNo">746</span>   */<a name="line.746"></a>
+<span class="sourceLineNo">747</span>  public static void logFileSystemState(final FileSystem fs, final Path root, Logger LOG)<a name="line.747"></a>
+<span class="sourceLineNo">748</span>      throws IOException {<a name="line.748"></a>
+<span class="sourceLineNo">749</span>    LOG.debug("File system contents for path " + root);<a name="line.749"></a>
+<span class="sourceLineNo">750</span>    logFSTree(LOG, fs, root, "|-");<a name="line.750"></a>
+<span class="sourceLineNo">751</span>  }<a name="line.751"></a>
+<span class="sourceLineNo">752</span><a name="line.752"></a>
+<span class="sourceLineNo">753</span>  /**<a name="line.753"></a>
+<span class="sourceLineNo">754</span>   * Recursive helper to log the state of the FS<a name="line.754"></a>
+<span class="sourceLineNo">755</span>   *<a name="line.755"></a>
+<span class="sourceLineNo">756</span>   * @see #logFileSystemState(FileSystem, Path, Logger)<a name="line.756"></a>
+<span class="sourceLineNo">757</span>   */<a name="line.757"></a>
+<span class="sourceLineNo">758</span>  private static void logFSTree(Logger LOG, final FileSystem fs, final Path root, String prefix)<a name="line.758"></a>
+<span class="sourceLineNo">759</span>      throws IOException {<a name="line.759"></a>
+<span class="sourceLineNo">760</span>    FileStatus[] files = listStatus(fs, root, null);<a name="line.760"></a>
+<span class="sourceLineNo">761</span>    if (files == null) {<a name="line.761"></a>
+<span class="sourceLineNo">762</span>      return;<a name="line.762"></a>
+<span class="sourceLineNo">763</span>    }<a name="line.763"></a>
+<span class="sourceLineNo">764</span><a name="line.764"></a>
+<span class="sourceLineNo">765</span>    for (FileStatus file : files) {<a name="line.765"></a>
+<span class="sourceLineNo">766</span>      if (file.isDirectory()) {<a name="line.766"></a>
+<span class="sourceLineNo">767</span>        LOG.debug(prefix + file.getPath().getName() + "/");<a name="line.767"></a>
+<span class="sourceLineNo">768</span>        logFSTree(LOG, fs, file.getPath(), prefix + "---");<a name="line.768"></a>
+<span class="sourceLineNo">769</span>      } else {<a name="line.769"></a>
+<span class="sourceLineNo">770</span>        LOG.debug(prefix + file.getPath().getName());<a name="line.770"></a>
+<span class="sourceLineNo">771</span>      }<a name="line.771"></a>
+<span class="sourceLineNo">772</span>    }<a name="line.772"></a>
+<span class="sourceLineNo">773</span>  }<a name="line.773"></a>
+<span class="sourceLineNo">774</span><a name="line.774"></a>
+<span class="sourceLineNo">775</span>  public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)<a name="line.775"></a>
+<span class="sourceLineNo">776</span>      throws IOException {<a name="line.776"></a>
+<span class="sourceLineNo">777</span>    // set the modify time for TimeToLive Cleaner<a name="line.777"></a>
+<span class="sourceLineNo">778</span>    fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);<a name="line.778"></a>
+<span class="sourceLineNo">779</span>    return fs.rename(src, dest);<a name="line.779"></a>
+<span class="sourceLineNo">780</span>  }<a name="line.780"></a>
+<span class="sourceLineNo">781</span><a name="line.781"></a>
+<span class="sourceLineNo">782</span>  /**<a name="line.782"></a>
+<span class="sourceLineNo">783</span>   * Do our short circuit read setup.<a name="line.783"></a>
+<span class="sourceLineNo">784</span>   * Checks buffer size to use and whether to do checksumming in hbase or hdfs.<a name="line.784"></a>
+<span class="sourceLineNo">785</span>   * @param conf must not be null<a name="line.785"></a>
+<span class="sourceLineNo">786</span>   */<a name="line.786"></a>
+<span class="sourceLineNo">787</span>  public static void setupShortCircuitRead(final Configuration conf) {<a name="line.787"></a>
+<span class="sourceLineNo">788</span>    // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.<a name="line.788"></a>
+<span class="sourceLineNo">789</span>    boolean shortCircuitSkipChecksum =<a name="line.789"></a>
+<span class="sourceLineNo">790</span>      conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);<a name="line.790"></a>
+<span class="sourceLineNo">791</span>    boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);<a name="line.791"></a>
+<span class="sourceLineNo">792</span>    if (shortCircuitSkipChecksum) {<a name="line.792"></a>
+<span class="sourceLineNo">793</span>      LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +<a name="line.793"></a>
+<span class="sourceLineNo">794</span>        "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +<a name="line.794"></a>
+<span class="sourceLineNo">795</span>        "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));<a name="line.795"></a>
+<span class="sourceLineNo">796</span>      assert !shortCircuitSkipChecksum; //this will fail if assertions are on<a name="line.796"></a>
+<span class="sourceLineNo">797</span>    }<a name="line.797"></a>
+<span class="sourceLineNo">798</span>    checkShortCircuitReadBufferSize(conf);<a name="line.798"></a>
+<span class="sourceLineNo">799</span>  }<a name="line.799"></a>
+<span class="sourceLineNo">800</span><a name="line.800"></a>
+<span class="sourceLineNo">801</span>  /**<a name="line.801"></a>
+<span class="sourceLineNo">802</span>   * Check if short circuit read buffer size is set and if not, set it to hbase value.<a name="line.802"></a>
+<span class="sourceLineNo">803</span>   * @param conf must not be null<a name="line.803"></a>
+<span class="sourceLineNo">804</span>   */<a name="line.804"></a>
+<span class="sourceLineNo">805</span>  public static void checkShortCircuitReadBufferSize(final Configuration conf) {<a name="line.805"></a>
+<span class="sourceLineNo">806</span>    final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;<a name="line.806"></a>
+<span class="sourceLineNo">807</span>    final int notSet = -1;<a name="line.807"></a>
+<span class="sourceLineNo">808</span>    // DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2<a name="line.808"></a>
+<span class="sourceLineNo">809</span>    final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";<a name="line.809"></a>
+<span class="sourceLineNo">810</span>    int size = conf.getInt(dfsKey, notSet);<a name="line.810"></a>
+<span class="sourceLineNo">811</span>    // If a size is set, return -- we will use it.<a name="line.811"></a>
+<span class="sourceLineNo">812</span>    if (size != notSet) {<a name="line.812"></a>
+<span class="sourceLineNo">813</span>      return;<a name="line.813"></a>
+<span class="sourceLineNo">814</span>    }<a name="line.814"></a>
+<span class="sourceLineNo">815</span>    // But short circuit buffer size is normally not set.  Put in place the hbase wanted size.<a name="line.815"></a>
+<span class="sourceLineNo">816</span>    int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);<a name="line.816"></a>
+<span class="sourceLineNo">817</span>    conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));<a name="line.817"></a>
+<span class="sourceLineNo">818</span>  }<a name="line.818"></a>
+<span class="sourceLineNo">819</span><a name="line.819"></a>
+<span class="sourceLineNo">820</span>  // Holder singleton idiom. JVM spec ensures this will be run at most once per Classloader, and<a name="line.820"></a>
+<span class="sourceLineNo">821</span>  // not until we attempt to reference it.<a name="line.821"></a>
+<span class="sourceLineNo">822</span>  private static class StreamCapabilities {<a name="line.822"></a>
+<span class="sourceLineNo">823</span>    public static final boolean PRESENT;<a name="line.823"></a>
+<span class="sourceLineNo">824</span>    public static final Class&lt;?&gt; CLASS;<a name="line.824"></a>
+<span class="sourceLineNo">825</span>    public static final Method METHOD;<a name="line.825"></a>
+<span class="sourceLineNo">826</span>    static {<a name="line.826"></a>
+<span class="sourceLineNo">827</span>      boolean tmp = false;<a name="line.827"></a>
+<span class="sourceLineNo">828</span>      Class&lt;?&gt; clazz = null;<a name="line.828"></a>
+<span class="sourceLineNo">829</span>      Method method = null;<a name="line.829"></a>
+<span class="sourceLineNo">830</span>      try {<a name="line.830"></a>
+<span class="sourceLineNo">831</span>        clazz = Class.forName("org.apache.hadoop.fs.StreamCapabilities");<a name="line.831"></a>
+<span class="sourceLineNo">832</span>        method = clazz.getMethod("hasCapability", String.class);<a name="line.832"></a>
+<span class="sourceLineNo">833</span>        tmp = true;<a name="line.833"></a>
+<span class="sourceLineNo">834</span>      } catch(ClassNotFoundException|NoSuchMethodException|SecurityException exception) {<a name="line.834"></a>
+<span class="sourceLineNo">835</span>        LOG.warn("Your Hadoop installation does not include the StreamCapabilities class from " +<a name="line.835"></a>
+<span class="sourceLineNo">836</span>                 "HDFS-11644, so we will skip checking if any FSDataOutputStreams actually " +<a name="line.836"></a>
+<span class="sourceLineNo">837</span>                 "support hflush/hsync. If you are running on top of HDFS this probably just " +<a name="line.837"></a>
+<span class="sourceLineNo">838</span>                 "means you have an older version and this can be ignored. If you are running on " +<a name="line.838"></a>
+<span class="sourceLineNo">839</span>                 "top of an alternate FileSystem implementation you should manually verify that " +<a name="line.839"></a>
+<span class="sourceLineNo">840</span>                 "hflush and hsync are implemented; otherwise you risk data loss and hard to " +<a name="line.840"></a>
+<span class="sourceLineNo">841</span>                 "diagnose errors when our assumptions are violated.");<a name="line.841"></a>
+<span class="sourceLineNo">842</span>        LOG.debug("The first request to check for StreamCapabilities came from this stacktrace.",<a name="line.842"></a>
+<span class="sourceLineNo">843</span>            exception);<a name="line.843"></a>
+<span class="sourceLineNo">844</span>      } finally {<a name="line.844"></a>
+<span class="sourceLineNo">845</span>        PRESENT = tmp;<a name="line.845"></a>
+<span class="sourceLineNo">846</span>        CLASS = clazz;<a name="line.846"></a>
+<span class="sourceLineNo">847</span>        METHOD = method;<a name="line.847"></a>
+<span class="sourceLineNo">848</span>      }<a name="line.848"></a>
+<span class="sourceLineNo">849</span>    }<a name="line.849"></a>
+<span class="sourceLineNo">850</span>  }<a name="line.850"></a>
+<span class="sourceLineNo">851</span><a name="line.851"></a>
+<span class="sourceLineNo">852</span>  /**<a name="line.852"></a>
+<span class="sourceLineNo">853</span>   * If our FileSystem version includes the StreamCapabilities class, check if<a name="line.853"></a>
+<span class="sourceLineNo">854</span>   * the given stream has a particular capability.<a name="line.854"></a>
+<span class="sourceLineNo">855</span>   * @param stream capabilities are per-stream instance, so check this one specifically. must not be<a name="line.855"></a>
+<span class="sourceLineNo">856</span>   *        null<a name="line.856"></a>
+<span class="sourceLineNo">857</span>   * @param capability what to look for, per Hadoop Common's FileSystem docs<a name="line.857"></a>
+<span class="sourceLineNo">858</span>   * @return true if there are no StreamCapabilities. false if there are, but this stream doesn't<a name="line.858"></a>
+<span class="sourceLineNo">859</span>   *         implement it. return result of asking the stream otherwise.<a name="line.859"></a>
+<span class="sourceLineNo">860</span>   */<a name="line.860"></a>
+<span class="sourceLineNo">861</span>  public static boolean hasCapability(FSDataOutputStream stream, String capability) {<a name="line.861"></a>
+<span class="sourceLineNo">862</span>    // be consistent whether or not StreamCapabilities is present<a name="line.862"></a>
+<span class="sourceLineNo">863</span>    if (stream == null) {<a name="line.863"></a>
+<span class="sourceLineNo">864</span>      throw new NullPointerException("stream parameter must not be null.");<a name="line.864"></a>
+<span class="sourceLineNo">865</span>    }<a name="line.865"></a>
+<span class="sourceLineNo">866</span>    // If o.a.h.fs.StreamCapabilities doesn't exist, assume everyone does everything<a name="line.866"></a>
+<span class="sourceLineNo">867</span>    // otherwise old versions of Hadoop will break.<a name="line.867"></a>
+<span class="sourceLineNo">868</span>    boolean result = true;<a name="line.868"></a>
+<span class="sourceLineNo">869</span>    if (StreamCapabilities.PRESENT) {<a name="line.869"></a>
+<span class="sourceLineNo">870</span>      // if StreamCapabilities is present, but the stream doesn't implement it<a name="line.870"></a>
+<span class="sourceLineNo">871</span>      // or we run into a problem invoking the method,<a name="line.871"></a>
+<span class="sourceLineNo">872</span>      // we treat that as equivalent to not declaring anything<a name="line.872"></a>
+<span class="sourceLineNo">873</span>      result = false;<a name="line.873"></a>
+<span class="sourceLineNo">874</span>      if (StreamCapabilities.CLASS.isAssignableFrom(stream.getClass())) {<a name="line.874"></a>
+<span class="sourceLineNo">875</span>        try {<a name="line.875"></a>
+<span class="sourceLineNo">876</span>          result = ((Boolean)StreamCapabilities.METHOD.invoke(stream, capability)).booleanValue();<a name="line.876"></a>
+<span class="sourceLineNo">877</span>        } catch (IllegalAccessException|IllegalArgumentException|InvocationTargetException<a name="line.877"></a>
+<span class="sourceLineNo">878</span>            exception) {<a name="line.878"></a>
+<span class="sourceLineNo">879</span>          LOG.warn("Your Hadoop installation's StreamCapabilities implementation doesn't match " +<a name="line.879"></a>
+<span class="sourceLineNo">880</span>              "our understanding of how it's supposed to work. Please file a JIRA and include " +<a name="line.880"></a>
+<span class="sourceLineNo">881</span>              "the following stack trace. In the mean time we're interpreting this behavior " +<a name="line.881"></a>
+<span class="sourceLineNo">882</span>              "difference as a lack of capability support, which will probably cause a failure.",<a

<TRUNCATED>