You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wh...@apache.org on 2014/05/01 20:46:19 UTC
svn commit: r1591732 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/common/
src/main/java/org/apache/hadoop/hdfs/server/datanode/
src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/...
Author: wheat9
Date: Thu May 1 18:46:18 2014
New Revision: 1591732
URL: http://svn.apache.org/r1591732
Log:
HDFS-6252. Phase out the old web UI in HDFS. Contributed by Haohui Mai.
Removed:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/browseBlock.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/browseDirectory.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/tail.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/block_info_xml.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_files.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/corrupt_replicas_xml.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/decommission.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/decommission.xsl
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsclusterhealth.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsclusterhealth.xsl
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsclusterhealth_utils.xsl
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfsnodelist.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/nn_browsedfscontent.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/journalstatus.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.jsp
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAWebUI.java
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu May 1 18:46:18 2014
@@ -127,6 +127,8 @@ Trunk (Unreleased)
HDFS-6246. Remove 'dfs.support.append' flag from trunk code. (umamahesh)
+ HDFS-6252. Phase out the old web UI in HDFS. (wheat9)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Thu May 1 18:46:18 2014
@@ -87,475 +87,10 @@ public class JspHelper {
public static final String CURRENT_CONF = "current.conf";
public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
public static final String NAMENODE_ADDRESS = "nnaddr";
- static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
- "=";
private static final Log LOG = LogFactory.getLog(JspHelper.class);
/** Private constructor for preventing creating JspHelper object. */
- private JspHelper() {}
-
- // data structure to count number of blocks on datanodes.
- private static class NodeRecord extends DatanodeInfo {
- int frequency;
-
- public NodeRecord(DatanodeInfo info, int count) {
- super(info);
- this.frequency = count;
- }
-
- @Override
- public boolean equals(Object obj) {
- // Sufficient to use super equality as datanodes are uniquely identified
- // by DatanodeID
- return (this == obj) || super.equals(obj);
- }
- @Override
- public int hashCode() {
- // Super implementation is sufficient
- return super.hashCode();
- }
- }
-
- // compare two records based on their frequency
- private static class NodeRecordComparator implements Comparator<NodeRecord> {
-
- @Override
- public int compare(NodeRecord o1, NodeRecord o2) {
- if (o1.frequency < o2.frequency) {
- return -1;
- } else if (o1.frequency > o2.frequency) {
- return 1;
- }
- return 0;
- }
- }
-
- /**
- * convenience method for canonicalizing host name.
- * @param addr name:port or name
- * @return canonicalized host name
- */
- public static String canonicalize(String addr) {
- // default port 1 is supplied to allow addr without port.
- // the port will be ignored.
- return NetUtils.createSocketAddr(addr, 1).getAddress()
- .getCanonicalHostName();
- }
-
- /**
- * A helper class that generates the correct URL for different schema.
- *
- */
- public static final class Url {
- public static String authority(String scheme, DatanodeID d) {
- String fqdn = (d.getIpAddr() != null && !d.getIpAddr().isEmpty())?
- canonicalize(d.getIpAddr()):
- d.getHostName();
- if (scheme.equals("http")) {
- return fqdn + ":" + d.getInfoPort();
- } else if (scheme.equals("https")) {
- return fqdn + ":" + d.getInfoSecurePort();
- } else {
- throw new IllegalArgumentException("Unknown scheme:" + scheme);
- }
- }
-
- public static String url(String scheme, DatanodeID d) {
- return scheme + "://" + authority(scheme, d);
- }
- }
-
- public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf)
- throws IOException {
- HashMap<DatanodeInfo, NodeRecord> map =
- new HashMap<DatanodeInfo, NodeRecord>();
- for (LocatedBlock block : blks.getLocatedBlocks()) {
- DatanodeInfo[] nodes = block.getLocations();
- for (DatanodeInfo node : nodes) {
- NodeRecord record = map.get(node);
- if (record == null) {
- map.put(node, new NodeRecord(node, 1));
- } else {
- record.frequency++;
- }
- }
- }
- NodeRecord[] nodes = map.values().toArray(new NodeRecord[map.size()]);
- Arrays.sort(nodes, new NodeRecordComparator());
- return bestNode(nodes, false);
- }
-
- public static DatanodeInfo bestNode(LocatedBlock blk, Configuration conf)
- throws IOException {
- DatanodeInfo[] nodes = blk.getLocations();
- return bestNode(nodes, true);
- }
-
- private static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom)
- throws IOException {
- if (nodes == null || nodes.length == 0) {
- throw new IOException("No nodes contain this block");
- }
- int l = 0;
- while (l < nodes.length && !nodes[l].isDecommissioned()) {
- ++l;
- }
-
- if (l == 0) {
- throw new IOException("No active nodes contain this block");
- }
-
- int index = doRandom ? DFSUtil.getRandom().nextInt(l) : 0;
- return nodes[index];
- }
-
- public static void streamBlockInAscii(InetSocketAddress addr, String poolId,
- long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
- long blockSize, long offsetIntoBlock, long chunkSizeToView,
- JspWriter out, final Configuration conf, DFSClient.Conf dfsConf,
- final DataEncryptionKey encryptionKey)
- throws IOException {
- if (chunkSizeToView == 0) return;
- int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
-
- BlockReader blockReader = new BlockReaderFactory(dfsConf).
- setInetSocketAddress(addr).
- setBlock(new ExtendedBlock(poolId, blockId, 0, genStamp)).
- setFileName(BlockReaderFactory.getFileName(addr, poolId, blockId)).
- setBlockToken(blockToken).
- setStartOffset(offsetIntoBlock).
- setLength(amtToRead).
- setVerifyChecksum(true).
- setClientName("JspHelper").
- setClientCacheContext(ClientContext.getFromConf(conf)).
- setDatanodeInfo(new DatanodeInfo(
- new DatanodeID(addr.getAddress().getHostAddress(),
- addr.getHostName(), poolId, addr.getPort(), 0, 0, 0))).
- setCachingStrategy(CachingStrategy.newDefaultStrategy()).
- setConfiguration(conf).
- setRemotePeerFactory(new RemotePeerFactory() {
- @Override
- public Peer newConnectedPeer(InetSocketAddress addr)
- throws IOException {
- Peer peer = null;
- Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
- try {
- sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
- sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
- peer = TcpPeerServer.peerFromSocketAndKey(sock, encryptionKey);
- } finally {
- if (peer == null) {
- IOUtils.closeSocket(sock);
- }
- }
- return peer;
- }
- }).
- build();
-
- final byte[] buf = new byte[amtToRead];
- try {
- int readOffset = 0;
- int retries = 2;
- while (amtToRead > 0) {
- int numRead = amtToRead;
- try {
- blockReader.readFully(buf, readOffset, amtToRead);
- } catch (IOException e) {
- retries--;
- if (retries == 0)
- throw new IOException("Could not read data from datanode");
- continue;
- }
- amtToRead -= numRead;
- readOffset += numRead;
- }
- } finally {
- blockReader.close();
- }
- out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
- }
-
- public static void addTableHeader(JspWriter out) throws IOException {
- out.print("<table border=\"1\""+
- " cellpadding=\"2\" cellspacing=\"2\">");
- out.print("<tbody>");
- }
- public static void addTableRow(JspWriter out, String[] columns) throws IOException {
- out.print("<tr>");
- for (int i = 0; i < columns.length; i++) {
- out.print("<td style=\"vertical-align: top;\"><B>"+columns[i]+"</B><br></td>");
- }
- out.print("</tr>");
- }
- public static void addTableRow(JspWriter out, String[] columns, int row) throws IOException {
- out.print("<tr>");
-
- for (int i = 0; i < columns.length; i++) {
- if (row/2*2 == row) {//even
- out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
- } else {
- out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
-
- }
- }
- out.print("</tr>");
- }
- public static void addTableFooter(JspWriter out) throws IOException {
- out.print("</tbody></table>");
- }
-
- public static void sortNodeList(final List<DatanodeDescriptor> nodes,
- String field, String order) {
-
- class NodeComapare implements Comparator<DatanodeDescriptor> {
- static final int
- FIELD_NAME = 1,
- FIELD_LAST_CONTACT = 2,
- FIELD_BLOCKS = 3,
- FIELD_CAPACITY = 4,
- FIELD_USED = 5,
- FIELD_PERCENT_USED = 6,
- FIELD_NONDFS_USED = 7,
- FIELD_REMAINING = 8,
- FIELD_PERCENT_REMAINING = 9,
- FIELD_ADMIN_STATE = 10,
- FIELD_DECOMMISSIONED = 11,
- FIELD_BLOCKPOOL_USED = 12,
- FIELD_PERBLOCKPOOL_USED = 13,
- FIELD_FAILED_VOLUMES = 14,
- SORT_ORDER_ASC = 1,
- SORT_ORDER_DSC = 2;
-
- int sortField = FIELD_NAME;
- int sortOrder = SORT_ORDER_ASC;
-
- public NodeComapare(String field, String order) {
- if (field.equals("lastcontact")) {
- sortField = FIELD_LAST_CONTACT;
- } else if (field.equals("capacity")) {
- sortField = FIELD_CAPACITY;
- } else if (field.equals("used")) {
- sortField = FIELD_USED;
- } else if (field.equals("nondfsused")) {
- sortField = FIELD_NONDFS_USED;
- } else if (field.equals("remaining")) {
- sortField = FIELD_REMAINING;
- } else if (field.equals("pcused")) {
- sortField = FIELD_PERCENT_USED;
- } else if (field.equals("pcremaining")) {
- sortField = FIELD_PERCENT_REMAINING;
- } else if (field.equals("blocks")) {
- sortField = FIELD_BLOCKS;
- } else if (field.equals("adminstate")) {
- sortField = FIELD_ADMIN_STATE;
- } else if (field.equals("decommissioned")) {
- sortField = FIELD_DECOMMISSIONED;
- } else if (field.equals("bpused")) {
- sortField = FIELD_BLOCKPOOL_USED;
- } else if (field.equals("pcbpused")) {
- sortField = FIELD_PERBLOCKPOOL_USED;
- } else if (field.equals("volfails")) {
- sortField = FIELD_FAILED_VOLUMES;
- } else {
- sortField = FIELD_NAME;
- }
-
- if (order.equals("DSC")) {
- sortOrder = SORT_ORDER_DSC;
- } else {
- sortOrder = SORT_ORDER_ASC;
- }
- }
-
- @Override
- public int compare(DatanodeDescriptor d1,
- DatanodeDescriptor d2) {
- int ret = 0;
- switch (sortField) {
- case FIELD_LAST_CONTACT:
- ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
- break;
- case FIELD_CAPACITY:
- long dlong = d1.getCapacity() - d2.getCapacity();
- ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
- break;
- case FIELD_USED:
- dlong = d1.getDfsUsed() - d2.getDfsUsed();
- ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
- break;
- case FIELD_NONDFS_USED:
- dlong = d1.getNonDfsUsed() - d2.getNonDfsUsed();
- ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
- break;
- case FIELD_REMAINING:
- dlong = d1.getRemaining() - d2.getRemaining();
- ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
- break;
- case FIELD_PERCENT_USED:
- double ddbl =((d1.getDfsUsedPercent())-
- (d2.getDfsUsedPercent()));
- ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
- break;
- case FIELD_PERCENT_REMAINING:
- ddbl =((d1.getRemainingPercent())-
- (d2.getRemainingPercent()));
- ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
- break;
- case FIELD_BLOCKS:
- ret = d1.numBlocks() - d2.numBlocks();
- break;
- case FIELD_ADMIN_STATE:
- ret = d1.getAdminState().toString().compareTo(
- d2.getAdminState().toString());
- break;
- case FIELD_DECOMMISSIONED:
- ret = DFSUtil.DECOM_COMPARATOR.compare(d1, d2);
- break;
- case FIELD_NAME:
- ret = d1.getHostName().compareTo(d2.getHostName());
- break;
- case FIELD_BLOCKPOOL_USED:
- dlong = d1.getBlockPoolUsed() - d2.getBlockPoolUsed();
- ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
- break;
- case FIELD_PERBLOCKPOOL_USED:
- ddbl = d1.getBlockPoolUsedPercent() - d2.getBlockPoolUsedPercent();
- ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
- break;
- case FIELD_FAILED_VOLUMES:
- int dint = d1.getVolumeFailures() - d2.getVolumeFailures();
- ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0);
- break;
- default:
- throw new IllegalArgumentException("Invalid sortField");
- }
- return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
- }
- }
-
- Collections.sort(nodes, new NodeComapare(field, order));
- }
-
- public static void printPathWithLinks(String dir, JspWriter out,
- int namenodeInfoPort,
- String tokenString,
- String nnAddress
- ) throws IOException {
- try {
- String[] parts = dir.split(Path.SEPARATOR);
- StringBuilder tempPath = new StringBuilder(dir.length());
- out.print("<a href=\"browseDirectory.jsp" + "?dir="+ Path.SEPARATOR
- + "&namenodeInfoPort=" + namenodeInfoPort
- + getDelegationTokenUrlParam(tokenString)
- + getUrlParam(NAMENODE_ADDRESS, nnAddress) + "\">" + Path.SEPARATOR
- + "</a>");
- tempPath.append(Path.SEPARATOR);
- for (int i = 0; i < parts.length-1; i++) {
- if (!parts[i].equals("")) {
- tempPath.append(parts[i]);
- out.print("<a href=\"browseDirectory.jsp" + "?dir="
- + HtmlQuoting.quoteHtmlChars(tempPath.toString()) + "&namenodeInfoPort=" + namenodeInfoPort
- + getDelegationTokenUrlParam(tokenString)
- + getUrlParam(NAMENODE_ADDRESS, nnAddress));
- out.print("\">" + HtmlQuoting.quoteHtmlChars(parts[i]) + "</a>" + Path.SEPARATOR);
- tempPath.append(Path.SEPARATOR);
- }
- }
- if(parts.length > 0) {
- out.print(HtmlQuoting.quoteHtmlChars(parts[parts.length-1]));
- }
- }
- catch (UnsupportedEncodingException ex) {
- ex.printStackTrace();
- }
- }
-
- public static void printGotoForm(JspWriter out,
- int namenodeInfoPort,
- String tokenString,
- String file,
- String nnAddress) throws IOException {
- out.print("<form action=\"browseDirectory.jsp\" method=\"get\" name=\"goto\">");
- out.print("Goto : ");
- out.print("<input name=\"dir\" type=\"text\" width=\"50\" id=\"dir\" value=\""+ HtmlQuoting.quoteHtmlChars(file)+"\"/>");
- out.print("<input name=\"go\" type=\"submit\" value=\"go\"/>");
- out.print("<input name=\"namenodeInfoPort\" type=\"hidden\" "
- + "value=\"" + namenodeInfoPort + "\"/>");
- if (UserGroupInformation.isSecurityEnabled()) {
- out.print("<input name=\"" + DELEGATION_PARAMETER_NAME
- + "\" type=\"hidden\" value=\"" + tokenString + "\"/>");
- }
- out.print("<input name=\""+ NAMENODE_ADDRESS +"\" type=\"hidden\" "
- + "value=\"" + nnAddress + "\"/>");
- out.print("</form>");
- }
-
- public static void createTitle(JspWriter out,
- HttpServletRequest req,
- String file) throws IOException{
- if(file == null) file = "";
- int start = Math.max(0,file.length() - 100);
- if(start != 0)
- file = "..." + file.substring(start, file.length());
- out.print("<title>HDFS:" + file + "</title>");
- }
-
- /** Convert a String to chunk-size-to-view. */
- public static int string2ChunkSizeToView(String s, int defaultValue) {
- int n = s == null? 0: Integer.parseInt(s);
- return n > 0? n: defaultValue;
- }
-
- /** Return a table containing version information. */
- public static String getVersionTable() {
- return "<div class='dfstable'><table>"
- + "\n <tr><td class='col1'>Version:</td><td>" + VersionInfo.getVersion() + ", " + VersionInfo.getRevision() + "</td></tr>"
- + "\n <tr><td class='col1'>Compiled:</td><td>" + VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch() + "</td></tr>"
- + "\n</table></div>";
- }
-
- /**
- * Validate filename.
- * @return null if the filename is invalid.
- * Otherwise, return the validated filename.
- */
- public static String validatePath(String p) {
- return p == null || p.length() == 0?
- null: new Path(p).toUri().getPath();
- }
-
- /**
- * Validate a long value.
- * @return null if the value is invalid.
- * Otherwise, return the validated Long object.
- */
- public static Long validateLong(String value) {
- return value == null? null: Long.parseLong(value);
- }
-
- /**
- * Validate a URL.
- * @return null if the value is invalid.
- * Otherwise, return the validated URL String.
- */
- public static String validateURL(String value) {
- try {
- return URLEncoder.encode(new URL(value).toString(), "UTF-8");
- } catch (IOException e) {
- return null;
- }
- }
-
- /**
- * If security is turned off, what is the default web user?
- * @param conf the configuration to look in
- * @return the remote user that was configuration
- */
- public static UserGroupInformation getDefaultWebUser(Configuration conf
- ) throws IOException {
- return UserGroupInformation.createRemoteUser(getDefaultWebUserName(conf));
- }
+ private JspHelper() {}
private static String getDefaultWebUserName(Configuration conf
) throws IOException {
@@ -736,56 +271,4 @@ public class JspHelper {
return username;
}
- /**
- * Returns the url parameter for the given token string.
- * @param tokenString
- * @return url parameter
- */
- public static String getDelegationTokenUrlParam(String tokenString) {
- if (tokenString == null ) {
- return "";
- }
- if (UserGroupInformation.isSecurityEnabled()) {
- return SET_DELEGATION + tokenString;
- } else {
- return "";
- }
- }
-
- /**
- * Returns the url parameter for the given string, prefixed with
- * paramSeparator.
- *
- * @param name parameter name
- * @param val parameter value
- * @param paramSeparator URL parameter prefix, i.e. either '?' or '&'
- * @return url parameter
- */
- public static String getUrlParam(String name, String val, String paramSeparator) {
- return val == null ? "" : paramSeparator + name + "=" + val;
- }
-
- /**
- * Returns the url parameter for the given string, prefixed with '?' if
- * firstParam is true, prefixed with '&' if firstParam is false.
- *
- * @param name parameter name
- * @param val parameter value
- * @param firstParam true if this is the first parameter in the list, false otherwise
- * @return url parameter
- */
- public static String getUrlParam(String name, String val, boolean firstParam) {
- return getUrlParam(name, val, firstParam ? "?" : "&");
- }
-
- /**
- * Returns the url parameter for the given string, prefixed with '&'.
- *
- * @param name parameter name
- * @param val parameter value
- * @return url parameter
- */
- public static String getUrlParam(String name, String val) {
- return getUrlParam(name, val, false);
- }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Thu May 1 18:46:18 2014
@@ -66,6 +66,7 @@ public class TestDecommission {
static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval
final Random myrand = new Random();
+ Path dir;
Path hostsFile;
Path excludeFile;
FileSystem localFileSys;
@@ -78,7 +79,7 @@ public class TestDecommission {
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
- Path dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
+ dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
hostsFile = new Path(dir, "hosts");
excludeFile = new Path(dir, "exclude");
@@ -98,7 +99,7 @@ public class TestDecommission {
@After
public void teardown() throws IOException {
- cleanupFile(localFileSys, excludeFile.getParent());
+ cleanupFile(localFileSys, dir);
if (cluster != null) {
cluster.shutdown();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Thu May 1 18:46:18 2014
@@ -17,13 +17,6 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.URL;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -32,8 +25,16 @@ import org.apache.hadoop.fs.FSDataInputS
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.junit.Assert;
import org.junit.Test;
+import javax.management.*;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
/**
* The test makes sure that NameNode detects presense blocks that do not have
* any valid replicas. In addition, it verifies that HDFS front page displays
@@ -45,8 +46,11 @@ public class TestMissingBlocksAlert {
LogFactory.getLog(TestMissingBlocksAlert.class);
@Test
- public void testMissingBlocksAlert() throws IOException,
- InterruptedException {
+ public void testMissingBlocksAlert()
+ throws IOException, InterruptedException,
+ MalformedObjectNameException, AttributeNotFoundException,
+ MBeanException, ReflectionException,
+ InstanceNotFoundException {
MiniDFSCluster cluster = null;
@@ -94,14 +98,11 @@ public class TestMissingBlocksAlert {
assertEquals(4, dfs.getUnderReplicatedBlocksCount());
assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());
-
- // Now verify that it shows up on webui
- URL url = new URL("http://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY) +
- "/dfshealth.jsp");
- String dfsFrontPage = DFSTestUtil.urlGet(url);
- String warnStr = "WARNING : There are ";
- assertTrue("HDFS Front page does not contain expected warning",
- dfsFrontPage.contains(warnStr + "1 missing blocks"));
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=NameNodeInfo");
+ Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+ "NumberOfMissingBlocks"));
// now do the reverse : remove the file expect the number of missing
// blocks to go to zero
@@ -116,11 +117,8 @@ public class TestMissingBlocksAlert {
assertEquals(2, dfs.getUnderReplicatedBlocksCount());
assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
- // and make sure WARNING disappears
- // Now verify that it shows up on webui
- dfsFrontPage = DFSTestUtil.urlGet(url);
- assertFalse("HDFS Front page contains unexpected warning",
- dfsFrontPage.contains(warnStr));
+ Assert.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
+ "NumberOfMissingBlocks"));
} finally {
if (cluster != null) {
cluster.shutdown();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java Thu May 1 18:46:18 2014
@@ -21,16 +21,12 @@ import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
-import java.net.URL;
-import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -41,6 +37,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+
public class TestNNWithQJM {
final Configuration conf = new HdfsConfiguration();
private MiniJournalCluster mjc = null;
@@ -204,55 +201,4 @@ public class TestNNWithQJM {
"Unable to start log segment 1: too few journals", ioe);
}
}
-
- @Test (timeout = 30000)
- public void testWebPageHasQjmInfo() throws Exception {
- conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
- MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
- conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
- mjc.getQuorumJournalURI("myjournal").toString());
- // Speed up the test
- conf.setInt(
- CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
-
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .manageNameDfsDirs(false)
- .build();
- try {
- URL url = new URL("http://localhost:"
- + NameNode.getHttpAddress(cluster.getConfiguration(0)).getPort()
- + "/dfshealth.jsp");
-
- cluster.getFileSystem().mkdirs(TEST_PATH);
-
- String contents = DFSTestUtil.urlGet(url);
- assertTrue(contents.contains("QJM to ["));
- assertTrue(contents.contains("Written txid 2"));
-
- // Stop one JN, do another txn, and make sure it shows as behind
- // stuck behind the others.
- mjc.getJournalNode(0).stopAndJoin(0);
-
- cluster.getFileSystem().delete(TEST_PATH, true);
-
- contents = DFSTestUtil.urlGet(url);
- System.out.println(contents);
- assertTrue(Pattern.compile("1 txns/\\d+ms behind").matcher(contents)
- .find());
-
- // Restart NN while JN0 is still down.
- cluster.restartNameNode();
-
- contents = DFSTestUtil.urlGet(url);
- System.out.println(contents);
- assertTrue(Pattern.compile("never written").matcher(contents)
- .find());
-
-
- } finally {
- cluster.shutdown();
- }
-
- }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java Thu May 1 18:46:18 2014
@@ -170,11 +170,6 @@ public class TestJournalNode {
assertTrue("Bad contents: " + pageContents,
pageContents.contains(
"Hadoop:service=JournalNode,name=JvmMetrics"));
-
- // Check JSP page.
- pageContents = DFSTestUtil.urlGet(
- new URL(urlRoot + "/journalstatus.jsp"));
- assertTrue(pageContents.contains("JournalNode"));
// Create some edits on server side
byte[] EDITS_DATA = QJMTestUtil.createTxnData(1, 3);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Thu May 1 18:46:18 2014
@@ -158,25 +158,6 @@ public class TestJspHelper {
.next();
Assert.assertEquals(expected, tokenInUgi.getService().toString());
}
-
-
- @Test
- public void testDelegationTokenUrlParam() {
- conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- String tokenString = "xyzabc";
- String delegationTokenParam = JspHelper
- .getDelegationTokenUrlParam(tokenString);
- //Security is enabled
- Assert.assertEquals(JspHelper.SET_DELEGATION + "xyzabc",
- delegationTokenParam);
- conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
- UserGroupInformation.setConfiguration(conf);
- delegationTokenParam = JspHelper
- .getDelegationTokenUrlParam(tokenString);
- //Empty string must be returned because security is disabled.
- Assert.assertEquals("", delegationTokenParam);
- }
@Test
public void testGetUgiFromToken() throws IOException {
@@ -403,32 +384,6 @@ public class TestJspHelper {
}
}
- @Test
- public void testPrintGotoFormWritesValidXML() throws IOException,
- ParserConfigurationException, SAXException {
- JspWriter mockJspWriter = mock(JspWriter.class);
- ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
- doAnswer(new Answer<Object>() {
- @Override
- public Object answer(InvocationOnMock invok) {
- Object[] args = invok.getArguments();
- jspWriterOutput += (String) args[0];
- return null;
- }
- }).when(mockJspWriter).print(arg.capture());
-
- jspWriterOutput = "";
-
- JspHelper.printGotoForm(mockJspWriter, 424242, "a token string",
- "foobar/file", "0.0.0.0");
-
- DocumentBuilder parser =
- DocumentBuilderFactory.newInstance().newDocumentBuilder();
- InputSource is = new InputSource();
- is.setCharacterStream(new StringReader(jspWriterOutput));
- parser.parse(is);
- }
-
private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getParameter(UserParam.NAME)).thenReturn(user);
@@ -464,146 +419,6 @@ public class TestJspHelper {
}
@Test
- public void testSortNodeByFields() throws Exception {
- DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "datanode1",
- 1234, 2345, 3456, 4567);
- DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "datanode2",
- 1235, 2346, 3457, 4568);
-
- // Setup DatanodeDescriptors with one storage each.
- DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1");
- DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2");
-
- // Update the DatanodeDescriptors with their attached storages.
- BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1"));
- BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2"));
-
- DatanodeStorage dns1 = new DatanodeStorage("dnStorage1");
- DatanodeStorage dns2 = new DatanodeStorage("dnStorage2");
-
- StorageReport[] report1 = new StorageReport[] {
- new StorageReport(dns1, false, 1024, 100, 924, 100)
- };
- StorageReport[] report2 = new StorageReport[] {
- new StorageReport(dns2, false, 2500, 200, 1848, 200)
- };
- dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2);
- dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1);
-
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- live.add(dnDesc1);
- live.add(dnDesc2);
-
- // Test sorting by failed volumes
- JspHelper.sortNodeList(live, "volfails", "ASC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
- JspHelper.sortNodeList(live, "volfails", "DSC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- // Test sorting by Blockpool used
- JspHelper.sortNodeList(live, "bpused", "ASC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
- JspHelper.sortNodeList(live, "bpused", "DSC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
-
- // Test sorting by Percentage Blockpool used
- JspHelper.sortNodeList(live, "pcbpused", "ASC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
- JspHelper.sortNodeList(live, "pcbpused", "DSC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- //unexisted field comparition is d1.getHostName().compareTo(d2.getHostName());
- JspHelper.sortNodeList(live, "unexists", "ASC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- JspHelper.sortNodeList(live, "unexists", "DSC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
-
- // test sorting by capacity
- JspHelper.sortNodeList(live, "capacity", "ASC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- JspHelper.sortNodeList(live, "capacity", "DSC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
-
- // test sorting by used
- JspHelper.sortNodeList(live, "used", "ASC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- JspHelper.sortNodeList(live, "used", "DSC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
-
- // test sorting by nondfsused
- JspHelper.sortNodeList(live, "nondfsused", "ASC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- JspHelper.sortNodeList(live, "nondfsused", "DSC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
-
- // test sorting by remaining
- JspHelper.sortNodeList(live, "remaining", "ASC");
- Assert.assertEquals(dnDesc1, live.get(0));
- Assert.assertEquals(dnDesc2, live.get(1));
-
- JspHelper.sortNodeList(live, "remaining", "DSC");
- Assert.assertEquals(dnDesc2, live.get(0));
- Assert.assertEquals(dnDesc1, live.get(1));
- }
-
- @Test
- public void testPrintMethods() throws IOException {
- JspWriter out = mock(JspWriter.class);
- HttpServletRequest req = mock(HttpServletRequest.class);
-
- final StringBuffer buffer = new StringBuffer();
-
- ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
- doAnswer(new Answer<Object>() {
- @Override
- public Object answer(InvocationOnMock invok) {
- Object[] args = invok.getArguments();
- buffer.append((String)args[0]);
- return null;
- }
- }).when(out).print(arg.capture());
-
-
- JspHelper.createTitle(out, req, "testfile.txt");
- Mockito.verify(out, Mockito.times(1)).print(Mockito.anyString());
-
- JspHelper.addTableHeader(out);
- Mockito.verify(out, Mockito.times(1 + 2)).print(Mockito.anyString());
-
- JspHelper.addTableRow(out, new String[] {" row11", "row12 "});
- Mockito.verify(out, Mockito.times(1 + 2 + 4)).print(Mockito.anyString());
-
- JspHelper.addTableRow(out, new String[] {" row11", "row12 "}, 3);
- Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4)).print(Mockito.anyString());
-
- JspHelper.addTableRow(out, new String[] {" row21", "row22"});
- Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4)).print(Mockito.anyString());
-
- JspHelper.addTableFooter(out);
- Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4 + 1)).print(Mockito.anyString());
-
- assertFalse(Strings.isNullOrEmpty(buffer.toString()));
- }
-
- @Test
public void testReadWriteReplicaState() {
try {
DataOutputBuffer out = new DataOutputBuffer();
@@ -622,21 +437,6 @@ public class TestJspHelper {
fail("testReadWrite ex error ReplicaState");
}
}
-
- @Test
- public void testAuthority(){
- DatanodeID dnWithIp = new DatanodeID("127.0.0.1", "hostName", null,
- 50020, 50075, 50076, 50010);
- assertNotNull(JspHelper.Url.authority("http", dnWithIp));
-
- DatanodeID dnWithNullIp = new DatanodeID(null, "hostName", null,
- 50020, 50075, 50076, 50010);
- assertNotNull(JspHelper.Url.authority("http", dnWithNullIp));
-
- DatanodeID dnWithEmptyIp = new DatanodeID("", "hostName", null,
- 50020, 50075, 50076, 50010);
- assertNotNull(JspHelper.Url.authority("http", dnWithEmptyIp));
- }
private static String clientAddr = "1.1.1.1";
private static String chainedClientAddr = clientAddr+", 2.2.2.2";
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Thu May 1 18:46:18 2014
@@ -19,10 +19,8 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertTrue;
-import java.net.InetSocketAddress;
-import java.net.URL;
+import java.lang.management.ManagementFactory;
-import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
/**
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
*
@@ -73,7 +74,7 @@ public class TestHostsFiles {
}
@Test
- public void testHostsExcludeDfshealthJsp() throws Exception {
+ public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
@@ -117,17 +118,13 @@ public class TestHostsFiles {
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
-
- InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
- LOG.info("nnaddr = '" + nnHttpAddress + "'");
- String nnHostName = nnHttpAddress.getHostName();
- URL nnjsp = new URL("http://" + nnHostName + ":" + nnHttpAddress.getPort() + "/dfshealth.jsp");
- LOG.info("fetching " + nnjsp);
- String dfshealthPage = StringEscapeUtils.unescapeHtml(DFSTestUtil.urlGet(nnjsp));
- LOG.info("got " + dfshealthPage);
- assertTrue("dfshealth should contain " + nnHostName + ", got:" + dfshealthPage,
- dfshealthPage.contains(nnHostName));
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=NameNodeInfo");
+ String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
+ assertTrue("Live nodes should contain the decommissioned node",
+ nodes.contains("Decommissioned"));
} finally {
cluster.shutdown();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java?rev=1591732&r1=1591731&r2=1591732&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java Thu May 1 18:46:18 2014
@@ -78,18 +78,4 @@ public class TestSecondaryWebUi {
Assert.assertArrayEquals(checkpointEditlogDir,
snn.getCheckpointEditlogDirectories());
}
-
- @Test
- public void testSecondaryWebUiJsp()
- throws IOException, MalformedObjectNameException,
- AttributeNotFoundException, MBeanException,
- ReflectionException, InstanceNotFoundException {
- String pageContents = DFSTestUtil.urlGet(new URL("http://localhost:" +
- SecondaryNameNode.getHttpAddress(conf).getPort() + "/status.jsp"));
- Assert.assertTrue("Didn't find \"Last Checkpoint\"",
- pageContents.contains("Last Checkpoint"));
- Assert.assertTrue("Didn't find Checkpoint Transactions: 500",
- pageContents.contains("Checkpoint Transactions: 500"));
-
- }
}