You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/09/05 06:16:03 UTC
svn commit: r1380978 - in
/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/qjournal/client/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/webapps/hdfs/ src/test/java/org/apache/...
Author: todd
Date: Wed Sep 5 04:16:02 2012
New Revision: 1380978
URL: http://svn.apache.org/viewvc?rev=1380978&view=rev
Log:
HDFS-3869. Expose non-file journal manager details in web UI. Contributed by Todd Lipcon.
Modified:
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3077.txt
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3077.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3077.txt?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3077.txt (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3077.txt Wed Sep 5 04:16:02 2012
@@ -38,3 +38,5 @@ HDFS-3845. Fixes for edge cases in QJM r
HDFS-3877. QJM: Provide defaults for dfs.journalnode.*address (eli)
HDFS-3863. Track last "committed" txid in QJM (todd)
+
+HDFS-3869. Expose non-file journal manager details in web UI (todd)
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java Wed Sep 5 04:16:02 2012
@@ -140,4 +140,10 @@ interface AsyncLogger {
* after this point, and any in-flight RPCs may throw an exception.
*/
public void close();
+
+ /**
+ * Append an HTML-formatted report for this logger's status to the provided
+ * StringBuilder. This is displayed on the NN web UI.
+ */
+ public void appendHtmlReport(StringBuilder sb);
}
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java Wed Sep 5 04:16:02 2012
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.qjournal.p
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.jasper.compiler.JspUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -190,6 +191,24 @@ class AsyncLoggerSet {
int size() {
return loggers.size();
}
+
+ /**
+ * Append an HTML-formatted status readout on the current
+ * state of the underlying loggers.
+ * @param sb the StringBuilder to append to
+ */
+ void appendHtmlReport(StringBuilder sb) {
+ sb.append("<table class=\"storage\">");
+ sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
+ for (AsyncLogger l : loggers) {
+ sb.append("<tr>");
+ sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
+ sb.append("<td>");
+ l.appendHtmlReport(sb);
+ sb.append("</td></tr>\n");
+ }
+ sb.append("</table>");
+ }
/**
* @return the (mutable) list of loggers, for use in tests to
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java Wed Sep 5 04:16:02 2012
@@ -84,7 +84,12 @@ public class IPCLoggerChannel implements
* The number of bytes of edits data still in the queue.
*/
private int queuedEditsSizeBytes = 0;
-
+
+ /**
+ * The highest txid that has been successfully logged on the remote JN.
+ */
+ private long highestAckedTxId = 0;
+
/**
* The maximum number of bytes that can be pending in the queue.
* This keeps the writer from hitting OOME if one of the loggers
@@ -262,6 +267,9 @@ public class IPCLoggerChannel implements
public Void call() throws IOException {
getProxy().journal(createReqInfo(),
segmentTxId, firstTxnId, numTxns, data);
+ synchronized (IPCLoggerChannel.this) {
+ highestAckedTxId = firstTxnId + numTxns - 1;
+ }
return null;
}
});
@@ -398,4 +406,14 @@ public class IPCLoggerChannel implements
public String toString() {
return "Channel to journal node " + addr;
}
-}
+
+ @Override
+ public synchronized void appendHtmlReport(StringBuilder sb) {
+ sb.append("Written txid ").append(highestAckedTxId);
+ long behind = committedTxId - highestAckedTxId;
+ assert behind >= 0;
+ if (behind > 0) {
+ sb.append(" (" + behind + " behind)");
+ }
+ }
+}
\ No newline at end of file
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java Wed Sep 5 04:16:02 2012
@@ -109,4 +109,13 @@ class QuorumOutputStream extends EditLog
loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
}
}
+
+ @Override
+ public String generateHtmlReport() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Writing segment beginning at txid " + segmentTxId + "<br/>\n");
+ loggers.appendHtmlReport(sb);
+ return sb.toString();
+ }
+
}
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java Wed Sep 5 04:16:02 2012
@@ -24,6 +24,7 @@ import static org.apache.hadoop.util.Tim
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.jasper.compiler.JspUtil;
/**
* A generic abstract class to support journaling of edits logs into
@@ -132,4 +133,12 @@ public abstract class EditLogOutputStrea
protected long getNumSync() {
return numSync;
}
+
+ /**
+ * @return a short HTML snippet suitable for describing the current
+ * status of the stream
+ */
+ public String generateHtmlReport() {
+ return JspUtil.escapeXml(this.toString());
+ }
}
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java Wed Sep 5 04:16:02 2012
@@ -26,6 +26,7 @@ import java.util.LinkedList;
import java.util.List;
import java.util.PriorityQueue;
import java.util.SortedSet;
+import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -147,7 +148,7 @@ public class JournalSet implements Journ
return journal;
}
- private boolean isDisabled() {
+ boolean isDisabled() {
return disabled;
}
@@ -165,8 +166,12 @@ public class JournalSet implements Journ
return required;
}
}
-
- private List<JournalAndStream> journals = Lists.newArrayList();
+
+ // COW implementation is necessary since some users (eg the web ui) call
+ // getAllJournalStreams() and then iterate. Since this is rarely
+ // mutated, there is no performance concern.
+ private List<JournalAndStream> journals =
+ new CopyOnWriteArrayList<JournalSet.JournalAndStream>();
final int minimumRedundantJournals;
JournalSet(int minimumRedundantResources) {
@@ -519,7 +524,6 @@ public class JournalSet implements Journ
}
}
- @VisibleForTesting
List<JournalAndStream> getAllJournalStreams() {
return journals;
}
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Wed Sep 5 04:16:02 2012
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text;
@@ -61,6 +62,8 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.XMLOutputter;
+import com.google.common.base.Preconditions;
+
class NamenodeJspHelper {
static String getSafeModeText(FSNamesystem fsn) {
if (!fsn.isInSafeMode())
@@ -213,6 +216,52 @@ class NamenodeJspHelper {
out.print("</table></div>\n");
}
+
+ /**
+ * Generate an HTML report containing the current status of the HDFS
+ * journals.
+ */
+ void generateJournalReport(JspWriter out, NameNode nn,
+ HttpServletRequest request) throws IOException {
+ FSEditLog log = nn.getFSImage().getEditLog();
+ Preconditions.checkArgument(log != null, "no edit log set in %s", nn);
+
+ out.println("<h3> " + nn.getRole() + " Journal Status: </h3>");
+
+ out.println("<b>Current transaction ID:</b> " +
+ nn.getFSImage().getLastAppliedOrWrittenTxId() + "<br/>");
+
+
+ boolean openForWrite = log.isOpenForWrite();
+
+ out.println("<div class=\"dfstable\">");
+ out.println("<table class=\"storage\" title=\"NameNode Journals\">\n"
+ + "<thead><tr><td><b>Journal Manager</b></td><td><b>State</b></td></tr></thead>");
+ for (JournalAndStream jas : log.getJournals()) {
+ out.print("<tr>");
+ out.print("<td>" + jas.getManager());
+ if (jas.isRequired()) {
+ out.print(" [required]");
+ }
+ out.print("</td><td>");
+
+ if (jas.isDisabled()) {
+ out.print("<span class=\"failed\">Failed</span>");
+ } else if (openForWrite) {
+ EditLogOutputStream elos = jas.getCurrentStream();
+ if (elos != null) {
+ out.println(elos.generateHtmlReport());
+ } else {
+ out.println("not currently writing");
+ }
+ } else {
+ out.println("open for read");
+ }
+ out.println("</td></tr>");
+ }
+
+ out.println("</table></div>");
+ }
void generateHealthReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp Wed Sep 5 04:16:02 2012
@@ -60,8 +60,10 @@
<%= NamenodeJspHelper.getCorruptFilesWarning(fsn)%>
<% healthjsp.generateHealthReport(out, nn, request); %>
-<hr>
+<% healthjsp.generateJournalReport(out, nn, request); %>
+<hr/>
<% healthjsp.generateConfReport(out, nn, request); %>
+<hr>
<%
out.println(ServletUtil.htmlFooter());
%>
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java?rev=1380978&r1=1380977&r2=1380978&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java Wed Sep 5 04:16:02 2012
@@ -17,17 +17,18 @@
*/
package org.apache.hadoop.hdfs.qjournal;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
+import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -185,4 +186,41 @@ public class TestNNWithQJM {
"Unable to start log segment 1: too few journals", ioe);
}
}
+
+ @Test
+ public void testWebPageHasQjmInfo() throws Exception {
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+ mjc.getQuorumJournalURI("myjournal").toString());
+
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .manageNameDfsDirs(false)
+ .build();
+ try {
+ URL url = new URL("http://localhost:"
+ + NameNode.getHttpAddress(cluster.getConfiguration(0)).getPort()
+ + "/dfshealth.jsp");
+
+ cluster.getFileSystem().mkdirs(TEST_PATH);
+
+ String contents = DFSTestUtil.urlGet(url);
+ assertTrue(contents.contains("Channel to journal node"));
+ assertTrue(contents.contains("Written txid 2"));
+
+ // Stop one JN, do another txn, and make sure it shows as behind
+ // stuck behind the others.
+ mjc.getJournalNode(0).stopAndJoin(0);
+
+ cluster.getFileSystem().delete(TEST_PATH, true);
+
+ contents = DFSTestUtil.urlGet(url);
+ System.out.println(contents);
+ assertTrue(contents.contains("(1 behind)"));
+ } finally {
+ cluster.shutdown();
+ }
+
+ }
}