You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/05/24 18:20:01 UTC

svn commit: r1486107 - in /hbase/branches/0.95: hbase-common/src/main/resources/hbase-default.xml hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java

Author: stack
Date: Fri May 24 16:20:01 2013
New Revision: 1486107

URL: http://svn.apache.org/r1486107
Log:
HBASE-8449 Refactor recoverLease retries and pauses informed by findings over in hbase-8389

Added:
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java
Modified:
    hbase/branches/0.95/hbase-common/src/main/resources/hbase-default.xml
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java

Modified: hbase/branches/0.95/hbase-common/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-common/src/main/resources/hbase-default.xml?rev=1486107&r1=1486106&r2=1486107&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-common/src/main/resources/hbase-default.xml (original)
+++ hbase/branches/0.95/hbase-common/src/main/resources/hbase-default.xml Fri May 24 16:20:01 2013
@@ -1028,4 +1028,21 @@ possible configurations would overwhelm 
         hbase.server.compactchecker.interval.multiplier multiplied by hbase.server.thread.wakefrequency.
     </description>
   </property>
+  <property>
+    <name>hbase.lease.recovery.timeout</name>
+    <value>900000</value>
+    <description>
+      How long we wait on dfs lease recovery in total before giving up.
+    </description>
+  </property>
+  <property>
+    <name>hbase.lease.recovery.dfs.timeout</name>
+    <value>61000</value>
+    <description>
+        How long between dfs recover lease invocations. Should be just larger than how long
+        it takes the namenode to timeout trying to reach a datanode; usually
+        dfs.socket.timeout.  If HBase asked hdfs its cluster configs, we would not need
+        this config.  See the end of HBASE-8389 for more.
+    </description>
+  </property>
 </configuration>

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java?rev=1486107&r1=1486106&r2=1486107&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java Fri May 24 16:20:01 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.nam
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.lang.reflect.Method;
 
 
 /**
@@ -46,61 +47,162 @@ public class FSHDFSUtils extends FSUtils
    */
   @Override
   public void recoverFileLease(final FileSystem fs, final Path p,
-      Configuration conf, CancelableProgressable reporter) throws IOException {
+      Configuration conf, CancelableProgressable reporter)
+  throws IOException {
     // lease recovery not needed for local file system case.
-    if (!(fs instanceof DistributedFileSystem)) {
-      return;
-    }
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    if (!(fs instanceof DistributedFileSystem)) return;
+    recoverDFSFileLease((DistributedFileSystem)fs, p, conf, reporter);
+  }
 
-    LOG.info("Recovering file " + p);
+  /*
+   * Run the dfs recover lease. recoverLease is asynchronous. It returns:
+   *    -false when it starts the lease recovery (i.e. lease recovery not *yet* done)
+   *    - true when the lease recovery has succeeded or the file is closed.
+   * But, we have to be careful.  Each time we call recoverLease, it starts the recover lease
+   * process over from the beginning.  We could put ourselves in a situation where we are
+   * doing nothing but starting a recovery, interrupting it to start again, and so on.
+   * The findings over in HBASE-8354 have it that the namenode will try to recover the lease
+   * on the file's primary node.  If all is well, it should return near immediately.  But,
+   * as is common, it is the very primary node that has crashed and so the namenode will be
+   * stuck waiting on a socket timeout before it will ask another datanode to start the
+   * recovery. It does not help if we call recoverLease in the meantime and in particular,
+   * subsequent to the socket timeout, a recoverLease invocation will cause us to start
+   * over from square one (possibly waiting on socket timeout against primary node).  So,
+   * in the below, we do the following:
+   * 1. Call recoverLease.
+   * 2. If it returns true, break.
+   * 3. If it returns false, wait a few seconds and then call it again.
+   * 4. If it returns true, break.
+   * 5. If it returns false, wait for what we think the datanode socket timeout is
+   * (configurable) and then try again.
+   * 6. If it returns true, break.
+   * 7. If it returns false, repeat starting at step 5. above.
+   * 
+   * If HDFS-4525 is available, call it every second and we might be able to exit early.
+   */
+  boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
+      final Configuration conf, final CancelableProgressable reporter)
+  throws IOException {
+    LOG.info("Recovering lease on dfs file " + p);
     long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
     // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
-    //  usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
-    //  beyond that limit 'to be safe'.
+    // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
+    // beyond that limit 'to be safe'.
     long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
+    // This setting should be what the cluster dfs heartbeat is set to.
+    long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 3000);
+    // This should be set to how long it'll take for us to timeout against primary datanode if it
+    // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
+    // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY.
+    long subsequentPause = conf.getInt("hbase.lease.recovery.dfs.timeout", 61 * 1000);
     boolean recovered = false;
-    int nbAttempt = 0;
-    while (!recovered) {
-      nbAttempt++;
+    // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
+    for (int nbAttempt = 0; true; nbAttempt++) {
+      recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
+      if (recovered) break;
+      checkIfCancelled(reporter);
+      if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting)) break;
       try {
-        // recoverLease is asynchronous. We expect it to return true at the first call if the
-        //  file is closed. So, it returns:
-        //    - false when it starts the lease recovery (i.e. lease recovery not *yet* done
-        //    - true when the lease recovery has succeeded or the file is closed.
-        recovered = dfs.recoverLease(p);
-        LOG.info("Attempt " + nbAttempt + " to recoverLease on file " + p +
-            " returned " + recovered + ", trying for " +
-            (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms");
-      } catch (IOException e) {
-        if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
-          // This exception comes out instead of FNFE, fix it
-          throw new FileNotFoundException("The given HLog wasn't found at " + p);
-        } else if (e instanceof FileNotFoundException) {
-          throw e;
-        }
-        LOG.warn("Got IOException on attempt " + nbAttempt + " to recover lease for file " + p +
-            ", retrying.", e);
-      }
-      if (!recovered) {
-        if (reporter != null && !reporter.progress()) {
-          throw new InterruptedIOException("Operation is cancelled");
-        }
-        // try at least twice.
-        if (nbAttempt > 2 && recoveryTimeout < EnvironmentEdgeManager.currentTimeMillis()) {
-          LOG.error("Can't recoverLease after " + nbAttempt + " attempts and " +
-              (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms " + " for " + p +
-              " - continuing without the lease, but we could have a data loss.");
+        // On the first time through wait the short 'firstPause'.
+        if (nbAttempt == 0) {
+          Thread.sleep(firstPause);
         } else {
-          try {
-            Thread.sleep(nbAttempt < 3 ? 500 : 1000);
-          } catch (InterruptedException ie) {
-            InterruptedIOException iioe = new InterruptedIOException();
-            iioe.initCause(ie);
-            throw iioe;
+          // Cycle here until subsequentPause elapses.  While spinning, check isFileClosed if
+          // available (should be in hadoop 2.0.5... not in hadoop 1 though.
+          long localStartWaiting = EnvironmentEdgeManager.currentTimeMillis();
+          while ((EnvironmentEdgeManager.currentTimeMillis() - localStartWaiting) <
+              subsequentPause) {
+            Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
+            if (isFileClosed(dfs, p)) break;
+            checkIfCancelled(reporter);
           }
         }
+      } catch (InterruptedException ie) {
+        InterruptedIOException iioe = new InterruptedIOException();
+        iioe.initCause(ie);
+        throw iioe;
+      }
+    }
+    return recovered;
+  }
+
+  boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout,
+      final int nbAttempt, final Path p, final long startWaiting) {
+    if (recoveryTimeout < EnvironmentEdgeManager.currentTimeMillis()) {
+      LOG.warn("Cannot recoverLease after trying for " +
+        conf.getInt("hbase.lease.recovery.timeout", 900000) +
+        "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " +
+        getLogMessageDetail(nbAttempt, p, startWaiting));
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Try to recover the lease.
+   * @param dfs
+   * @param nbAttempt
+   * @param p
+   * @param startWaiting
+   * @return True if dfs#recoverLease came by true.
+   * @throws FileNotFoundException
+   */
+  boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p,
+      final long startWaiting)
+  throws FileNotFoundException {
+    boolean recovered = false;
+    try {
+      recovered = dfs.recoverLease(p);
+      LOG.info("recoverLease=" + recovered + ", " +
+        getLogMessageDetail(nbAttempt, p, startWaiting));
+    } catch (IOException e) {
+      if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) {
+        // This exception comes out instead of FNFE, fix it
+        throw new FileNotFoundException("The given HLog wasn't found at " + p);
+      } else if (e instanceof FileNotFoundException) {
+        throw (FileNotFoundException)e;
+      }
+      LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e);
+    }
+    return recovered;
+  }
+
+  /**
+   * @param nbAttempt
+   * @param p
+   * @param startWaiting
+   * @return Detail to append to any log message around lease recovering.
+   */
+  private String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) {
+    return "attempt=" + nbAttempt + " on file=" + p + " after " +
+      (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms";
+  }
+
+  /**
+   * Call HDFS-4525 isFileClosed if it is available.
+   * @param dfs
+   * @param p
+   * @return True if file is closed.
+   */
+  boolean isFileClosed(final DistributedFileSystem dfs, final Path p) {
+    try {
+      Method m = dfs.getClass().getMethod("isFileClosed", new Class<?>[] {String.class});
+      return (Boolean) m.invoke(dfs, p.toString());
+    } catch (SecurityException e) {
+      LOG.warn("No access", e);
+    } catch (NoSuchMethodException e) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("noSuchMethod isFileClosed (HDFS-4525); making do without");
       }
+    } catch (Exception e) {
+      LOG.warn("Failed invocation", e);
     }
+    return false;
+  }
+
+  void checkIfCancelled(final CancelableProgressable reporter)
+  throws InterruptedIOException {
+    if (reporter == null) return;
+    if (!reporter.progress()) throw new InterruptedIOException("Operation cancelled");
   }
-}
+}
\ No newline at end of file

Added: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java?rev=1486107&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java (added)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java Fri May 24 16:20:01 2013
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test our recoverLease loop against mocked up filesystem.
+ */
+@Category(MediumTests.class)
+public class TestFSHDFSUtils {
+  private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
+  static {
+    Configuration conf = HTU.getConfiguration();
+    conf.setInt("hbase.lease.recovery.dfs.timeout", 1000);
+    conf.setInt("hbase.lease.recovery.first.pause", 10);
+    conf.setInt("hbase.lease.recovery.pause", 10);
+  };
+  private FSHDFSUtils fsHDFSUtils = new FSHDFSUtils();
+  private static Path FILE = new Path(HTU.getDataTestDir(), "file.txt");
+  long startTime = -1;
+
+  @Before
+  public void setup() {
+    this.startTime = EnvironmentEdgeManager.currentTimeMillis();
+  }
+
+  /**
+   * Test recover lease eventually succeeding.
+   * @throws IOException 
+   */
+  @Test (timeout = 30000)
+  public void testRecoverLease() throws IOException {
+    CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
+    Mockito.when(reporter.progress()).thenReturn(true);
+    DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
+    // Fail four times and pass on the fifth.
+    Mockito.when(dfs.recoverLease(FILE)).
+      thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
+    assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
+    Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
+    // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
+    // invocations will happen pretty fast... the we fall into the longer wait loop).
+    assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) >
+      (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
+  }
+
+  /**
+   * Test that isFileClosed makes us recover lease faster.
+   * @throws IOException
+   */
+  @Test (timeout = 30000)
+  public void testIsFileClosed() throws IOException {
+    // Make this time long so it is plain we broke out because of the isFileClosed invocation.
+    HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000);
+    CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
+    Mockito.when(reporter.progress()).thenReturn(true);
+    IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class);
+    // Now make it so we fail the first two times -- the two fast invocations, then we fall into
+    // the long loop during which we will call isFileClosed.... the next invocation should
+    // therefore return true if we are to break the loop.
+    Mockito.when(dfs.recoverLease(FILE)).
+      thenReturn(false).thenReturn(false).thenReturn(true);
+    Mockito.when(dfs.isFileClosed(FILE.toString())).thenReturn(true);
+    assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
+    Mockito.verify(dfs, Mockito.times(3)).recoverLease(FILE);
+    Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE.toString());
+  }
+
+  /**
+   * Version of DFS that has HDFS-4525 in it.
+   */
+  class IsFileClosedDistributedFileSystem extends DistributedFileSystem {
+    /**
+     * Close status of a file. Copied over from HDFS-4525
+     * @return true if file is already closed
+     **/
+    public boolean isFileClosed(String f) throws IOException{
+      return false;
+    }
+  }
+}
\ No newline at end of file