You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/06/20 16:53:47 UTC

[1/4] hadoop git commit: YARN-5246. NMWebAppFilter web redirects drop query parameters. Contributed by Varun Vasudev.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 91d62aaf1 -> 3a0a329c4


YARN-5246. NMWebAppFilter web redirects drop query parameters. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0162f20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0162f20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0162f20

Branch: refs/heads/HDFS-1312
Commit: d0162f2040a4d98fbac51527dfc9447888cb32ab
Parents: 0319d73
Author: Junping Du <ju...@apache.org>
Authored: Sun Jun 19 17:44:54 2016 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Sun Jun 19 17:44:54 2016 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/webapp/util/WebAppUtils.java    | 51 ++++++++++++++++++++
 .../yarn/webapp/util/TestWebAppUtils.java       | 42 ++++++++++++++++
 .../nodemanager/webapp/NMWebAppFilter.java      | 10 ++--
 .../resourcemanager/webapp/RMWebAppFilter.java  | 24 ++-------
 4 files changed, 103 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0162f20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 6245541..f3af2bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -30,6 +31,7 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
@@ -42,6 +44,10 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RMHAUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
+
+import javax.servlet.http.HttpServletRequest;
 
 @Private
 @Evolving
@@ -418,4 +424,49 @@ public class WebAppUtils {
   public static List<String> listSupportedLogContentType() {
     return Arrays.asList("text", "octet-stream");
   }
+
+  private static String getURLEncodedQueryString(HttpServletRequest request) {
+    String queryString = request.getQueryString();
+    if (queryString != null && !queryString.isEmpty()) {
+      String reqEncoding = request.getCharacterEncoding();
+      if (reqEncoding == null || reqEncoding.isEmpty()) {
+        reqEncoding = "ISO-8859-1";
+      }
+      Charset encoding = Charset.forName(reqEncoding);
+      List<NameValuePair> params = URLEncodedUtils.parse(queryString, encoding);
+      return URLEncodedUtils.format(params, encoding);
+    }
+    return null;
+  }
+
+  /**
+   * Get a HTML escaped uri with the query parameters of the request.
+   * @param request HttpServletRequest with the request details
+   * @return HTML escaped uri with the query paramters
+   */
+  public static String getHtmlEscapedURIWithQueryString(
+      HttpServletRequest request) {
+    String urlEncodedQueryString = getURLEncodedQueryString(request);
+    if (urlEncodedQueryString != null) {
+      return HtmlQuoting.quoteHtmlChars(
+          request.getRequestURI() + "?" + urlEncodedQueryString);
+    }
+    return HtmlQuoting.quoteHtmlChars(request.getRequestURI());
+  }
+
+  /**
+   * Add the query params from a HttpServletRequest to the target uri passed.
+   * @param request HttpServletRequest with the request details
+   * @param targetUri the uri to which the query params must be added
+   * @return URL encoded string containing the targetUri + "?" + query string
+   */
+  public static String appendQueryParams(HttpServletRequest request,
+      String targetUri) {
+    String ret = targetUri;
+    String urlEncodedQueryString = getURLEncodedQueryString(request);
+    if (urlEncodedQueryString != null) {
+      ret += "?" + urlEncodedQueryString;
+    }
+    return ret;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0162f20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
index dcc8ba4..d6f78b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
@@ -39,6 +39,9 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
+
+import javax.servlet.http.HttpServletRequest;
 
 public class TestWebAppUtils {
   private static final String RM1_NODE_ID = "rm1";
@@ -176,6 +179,45 @@ public class TestWebAppUtils {
     return conf;
   }
 
+  @Test
+  public void testAppendQueryParams() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    String targetUri = "/test/path";
+    Mockito.when(request.getCharacterEncoding()).thenReturn(null);
+    Map<String, String> paramResultMap = new HashMap<>();
+    paramResultMap.put("param1=x", targetUri + "?" + "param1=x");
+    paramResultMap
+        .put("param1=x&param2=y", targetUri + "?" + "param1=x&param2=y");
+    paramResultMap.put("param1=x&param2=y&param3=x+y",
+        targetUri + "?" + "param1=x&param2=y&param3=x+y");
+
+    for (Map.Entry<String, String> entry : paramResultMap.entrySet()) {
+      Mockito.when(request.getQueryString()).thenReturn(entry.getKey());
+      String uri = WebAppUtils.appendQueryParams(request, targetUri);
+      Assert.assertEquals(entry.getValue(), uri);
+    }
+  }
+
+  @Test
+  public void testGetHtmlEscapedURIWithQueryString() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    String targetUri = "/test/path";
+    Mockito.when(request.getCharacterEncoding()).thenReturn(null);
+    Mockito.when(request.getRequestURI()).thenReturn(targetUri);
+    Map<String, String> paramResultMap = new HashMap<>();
+    paramResultMap.put("param1=x", targetUri + "?" + "param1=x");
+    paramResultMap
+        .put("param1=x&param2=y", targetUri + "?" + "param1=x&amp;param2=y");
+    paramResultMap.put("param1=x&param2=y&param3=x+y",
+        targetUri + "?" + "param1=x&amp;param2=y&amp;param3=x+y");
+
+    for (Map.Entry<String, String> entry : paramResultMap.entrySet()) {
+      Mockito.when(request.getQueryString()).thenReturn(entry.getKey());
+      String uri = WebAppUtils.getHtmlEscapedURIWithQueryString(request);
+      Assert.assertEquals(entry.getValue(), uri);
+    }
+  }
+
   public class TestBuilder extends HttpServer2.Builder {
     public String keypass;
     public String keystorePassword;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0162f20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
index 63fe6ea..d2f5849 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
 import com.google.inject.Injector;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 @Singleton
 public class NMWebAppFilter extends GuiceContainer{
@@ -58,8 +59,7 @@ public class NMWebAppFilter extends GuiceContainer{
   public void doFilter(HttpServletRequest request,
       HttpServletResponse response, FilterChain chain) throws IOException,
       ServletException {
-    String uri = HtmlQuoting.quoteHtmlChars(request.getRequestURI());
-    String redirectPath = containerLogPageRedirectPath(uri);
+    String redirectPath = containerLogPageRedirectPath(request);
     if (redirectPath != null) {
       String redirectMsg =
           "Redirecting to log server" + " : " + redirectPath;
@@ -72,7 +72,8 @@ public class NMWebAppFilter extends GuiceContainer{
     super.doFilter(request, response, chain);
   }
 
-  private String containerLogPageRedirectPath(String uri) {
+  private String containerLogPageRedirectPath(HttpServletRequest request) {
+    String uri = HtmlQuoting.quoteHtmlChars(request.getRequestURI());
     String redirectPath = null;
     if (!uri.contains("/ws/v1/node") && uri.contains("/containerlogs")) {
       String[] parts = uri.split("/");
@@ -105,7 +106,8 @@ public class NMWebAppFilter extends GuiceContainer{
             sb.append(containerIdStr);
             sb.append("/");
             sb.append(appOwner);
-            redirectPath = sb.toString();
+            redirectPath =
+                WebAppUtils.appendQueryParams(request, sb.toString());
           } else {
             injector.getInstance(RequestContext.class).set(
               ContainerLogsPage.REDIRECT_URL, "false");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0162f20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
index 0f1a590..1e4caba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
@@ -25,8 +25,6 @@ import java.io.PrintWriter;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.nio.charset.Charset;
-import java.util.List;
 import java.util.Random;
 import java.util.Set;
 
@@ -50,8 +48,6 @@ import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.http.NameValuePair;
-import org.apache.http.client.utils.URLEncodedUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -116,22 +112,10 @@ public class RMWebAppFilter extends GuiceContainer {
       htmlEscapedUri = "/";
     }
 
-    String uriWithQueryString = htmlEscapedUri;
-    String htmlEscapedUriWithQueryString = htmlEscapedUri;
-
-    String queryString = request.getQueryString();
-    if (queryString != null && !queryString.isEmpty()) {
-      String reqEncoding = request.getCharacterEncoding();
-      if (reqEncoding == null || reqEncoding.isEmpty()) {
-        reqEncoding = "ISO-8859-1";
-      }
-      Charset encoding = Charset.forName(reqEncoding);
-      List<NameValuePair> params = URLEncodedUtils.parse(queryString, encoding);
-      String urlEncodedQueryString = URLEncodedUtils.format(params, encoding);
-      uriWithQueryString += "?" + urlEncodedQueryString;
-      htmlEscapedUriWithQueryString = HtmlQuoting.quoteHtmlChars(
-          request.getRequestURI() + "?" + urlEncodedQueryString);
-    }
+    String uriWithQueryString =
+        WebAppUtils.appendQueryParams(request, htmlEscapedUri);
+    String htmlEscapedUriWithQueryString =
+        WebAppUtils.getHtmlEscapedURIWithQueryString(request);
 
     RMWebApp rmWebApp = injector.getInstance(RMWebApp.class);
     rmWebApp.checkIfStandbyRM();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/4] hadoop git commit: HADOOP-13192. org.apache.hadoop.util.LineReader cannot handle multibyte delimiters correctly. Contributed by binde.

Posted by ae...@apache.org.
HADOOP-13192. org.apache.hadoop.util.LineReader cannot handle multibyte delimiters correctly. Contributed by binde.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc6b50cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc6b50cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc6b50cc

Branch: refs/heads/HDFS-1312
Commit: fc6b50cc574e144fd314dea6c11987c6a384bfa6
Parents: d0162f2
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jun 20 17:07:26 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jun 20 17:07:26 2016 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/LineReader.java |  5 +-
 .../org/apache/hadoop/util/TestLineReader.java  | 59 ++++++++++++--------
 2 files changed, 41 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6b50cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
index 153953d..e20a7c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
@@ -318,7 +318,10 @@ public class LineReader implements Closeable {
             break;
           }
         } else if (delPosn != 0) {
-          bufferPosn--;
+          bufferPosn -= delPosn;
+          if(bufferPosn < -1) {
+            bufferPosn = -1;
+          }
           delPosn = 0;
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc6b50cc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java
index 9d909bc..52f8b9f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java
@@ -58,7 +58,7 @@ public class TestLineReader {
      * Check Condition
      *  In the second key value pair, the value should contain 
      *  "</"  from currentToken and
-     *  "id>" from next token 
+     *  "id>" from next token
      */  
     
     Delimiter="</entity>"; 
@@ -80,20 +80,21 @@ public class TestLineReader {
     String TestPartOfInput = CurrentBufferTailToken+NextBufferHeadToken;
   
     int BufferSize=64 * 1024;
-    int numberOfCharToFillTheBuffer=BufferSize-CurrentBufferTailToken.length();
+    int numberOfCharToFillTheBuffer =
+            BufferSize - CurrentBufferTailToken.length();
     StringBuilder fillerString=new StringBuilder();
-    for (int i=0;i<numberOfCharToFillTheBuffer;i++) {  
+    for (int i=0; i<numberOfCharToFillTheBuffer; i++) {
       fillerString.append('a'); // char 'a' as a filler for the test string
     }
 
     TestData = fillerString + TestPartOfInput;
     lineReader = new LineReader(
-        new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
+        new ByteArrayInputStream(TestData.getBytes()), Delimiter.getBytes());
     
     line = new Text();
     
-    lineReader.readLine(line); 
-    Assert.assertEquals(fillerString.toString(),line.toString());
+    lineReader.readLine(line);
+    Assert.assertEquals(fillerString.toString(), line.toString());
     
     lineReader.readLine(line);
     Assert.assertEquals(Expected, line.toString());
@@ -107,35 +108,49 @@ public class TestLineReader {
     Delimiter = "record";
     StringBuilder TestStringBuilder = new StringBuilder();
     
-    TestStringBuilder.append(Delimiter+"Kerala ");
-    TestStringBuilder.append(Delimiter+"Bangalore");
-    TestStringBuilder.append(Delimiter+" North Korea");
-    TestStringBuilder.append(Delimiter+Delimiter+
+    TestStringBuilder.append(Delimiter + "Kerala ");
+    TestStringBuilder.append(Delimiter + "Bangalore");
+    TestStringBuilder.append(Delimiter + " North Korea");
+    TestStringBuilder.append(Delimiter + Delimiter+
                         "Guantanamo");
-    TestStringBuilder.append(Delimiter+"ecord"+"recor"+"core"); //~EOF with 're'
+    TestStringBuilder.append(Delimiter + "ecord"
+            + "recor" + "core"); //~EOF with 're'
     
     TestData=TestStringBuilder.toString();
     
     lineReader = new LineReader(
-        new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
-    
-    lineReader.readLine(line); 
-    Assert.assertEquals("",line.toString());
-    lineReader.readLine(line); 
-    Assert.assertEquals("Kerala ",line.toString());
+        new ByteArrayInputStream(TestData.getBytes()), Delimiter.getBytes());
+
+    lineReader.readLine(line);
+    Assert.assertEquals("", line.toString());
+    lineReader.readLine(line);
+    Assert.assertEquals("Kerala ", line.toString());
     
     lineReader.readLine(line); 
-    Assert.assertEquals("Bangalore",line.toString());
+    Assert.assertEquals("Bangalore", line.toString());
     
     lineReader.readLine(line); 
-    Assert.assertEquals(" North Korea",line.toString());
+    Assert.assertEquals(" North Korea", line.toString());
     
     lineReader.readLine(line); 
-    Assert.assertEquals("",line.toString());
+    Assert.assertEquals("", line.toString());
     lineReader.readLine(line); 
-    Assert.assertEquals("Guantanamo",line.toString());
+    Assert.assertEquals("Guantanamo", line.toString());
     
     lineReader.readLine(line); 
-    Assert.assertEquals(("ecord"+"recor"+"core"),line.toString());
+    Assert.assertEquals(("ecord"+"recor"+"core"), line.toString());
+
+    // Test 3
+    // The test scenario is such that,
+    // aaaabccc split by aaab
+    TestData = "aaaabccc";
+    Delimiter = "aaab";
+    lineReader = new LineReader(
+        new ByteArrayInputStream(TestData.getBytes()), Delimiter.getBytes());
+
+    lineReader.readLine(line);
+    Assert.assertEquals("a", line.toString());
+    lineReader.readLine(line);
+    Assert.assertEquals("ccc", line.toString());
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[4/4] hadoop git commit: Merge branch 'trunk' into HDFS-1312

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a0a329c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a0a329c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a0a329c

Branch: refs/heads/HDFS-1312
Commit: 3a0a329c4df7452d18371a34be867e86f3c6e310
Parents: cc5f81d fc6b50c
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jun 20 09:50:50 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jun 20 09:50:50 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/LineReader.java |  5 +-
 .../org/apache/hadoop/util/TestLineReader.java  | 59 ++++++++++++--------
 .../hadoop/yarn/webapp/util/WebAppUtils.java    | 51 +++++++++++++++++
 .../yarn/webapp/util/TestWebAppUtils.java       | 42 ++++++++++++++
 .../nodemanager/webapp/NMWebAppFilter.java      | 10 ++--
 .../resourcemanager/webapp/RMWebAppFilter.java  | 24 ++------
 6 files changed, 144 insertions(+), 47 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/4] hadoop git commit: HDFS-10547. DiskBalancer: fix whitespace issue in doc files. Contributed by Anu Engineer.

Posted by ae...@apache.org.
HDFS-10547. DiskBalancer: fix whitespace issue in doc files. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc5f81d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc5f81d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc5f81d3

Branch: refs/heads/HDFS-1312
Commit: cc5f81d3f10f24939cbebf709f8284d9e7bd7d81
Parents: 91d62aa
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jun 20 09:48:34 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jun 20 09:48:34 2016 -0700

----------------------------------------------------------------------
 .../src/site/markdown/HDFSCommands.md           |  2 +-
 .../src/site/markdown/HDFSDiskbalancer.md       | 25 ++++++++++++--------
 2 files changed, 16 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc5f81d3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index f868118..39e8991 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -436,7 +436,7 @@ Runs a HDFS dfsadmin client.
 Usage:
 
        hdfs diskbalancer
-         [-plan <datanode> -uri <namenodeURI>]
+         [-plan <datanode> -fs <namenodeURI>]
          [-execute <planfile>]
          [-query <datanode>]
          [-cancel <planfile>]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc5f81d3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 388a4c6..522dc5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -64,11 +64,11 @@ The following sections discusses what commands are supported by disk balancer
 
 | COMMAND\_OPTION    | Description |
 |:---- |:---- |
-| `-out`|	Allows user to control the output location of the plan file.|
-| `-bandwidth`|	Since datanode is operational and might be running other jobs, diskbalancer limits the amount of data moved per second. This parameter allows user to set the maximum bandwidth to be used. This is not required to be set since diskBalancer will use the deafult bandwidth if this is not specified.|
-| `-thresholdPercentage`|	Since we operate against a snap-shot of datanode, themove operations have a tolerance percentage to declare success. If user specifies 10% and move operation is say 20GB in size, if we can move 18GB that operation is considered successful. This is to accomodate the changes in datanode in real time. This parameter is not needed and a default is used if not specified.|
-| `-maxerror` |	Max error allows users to specify how many block copy operations must fail before we abort a move step. Once again, this is not a needed parameter and a system-default is used if not specified.|
-| `-v`|	Verbose mode, specifying this parameter forces the plan command to print out a summary of the plan on stdout.|
+| `-out`| Allows user to control the output location of the plan file.|
+| `-bandwidth`| Since datanode is operational and might be running other jobs, diskbalancer limits the amount of data moved per second. This parameter allows user to set the maximum bandwidth to be used. This is not required to be set since diskBalancer will use the deafult bandwidth if this is not specified.|
+| `-thresholdPercentage`| Since we operate against a snap-shot of datanode, themove operations have a tolerance percentage to declare success. If user specifies 10% and move operation is say 20GB in size, if we can move 18GB that operation is considered successful. This is to accomodate the changes in datanode in real time. This parameter is not needed and a default is used if not specified.|
+| `-maxerror` | Max error allows users to specify how many block copy operations must fail before we abort a move step. Once again, this is not a needed parameter and a system-default is used if not specified.|
+| `-v`| Verbose mode, specifying this parameter forces the plan command to print out a summary of the plan on stdout.|
 
 The plan command writes two output files. They are `<nodename>.before.json` which
 captures the state of the datanode before the diskbalancer is run, and `<nodename>.plan.json`.
@@ -89,7 +89,7 @@ Query command gets the current status of the diskbalancer from a datanode.
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-|`-v` |	Verbose mode, Prints out status of individual moves|
+|`-v` | Verbose mode, Prints out status of individual moves|
 
 
 ### Cancel
@@ -103,6 +103,11 @@ or
 
 Plan ID can be read from datanode using query command.
 
+### Report
+Report command provides detailed report about a node.
+
+`hdfs diskbalancer -fs http://namenode.uri -report -node {DataNodeID | IP | Hostname}`
+
 
 Settings
 --------
@@ -111,7 +116,7 @@ There is a set of diskbalancer settings that can be controlled via hdfs-site.xml
 
 | Setting | Description |
 |:---- |:---- |
-|`dfs.disk.balancer.enabled`|	This parameter controls if diskbalancer is enabled for a cluster. if this is not enabled, any execute command will be rejected by the datanode.The default value is false.|
-|`dfs.disk.balancer.max.disk.throughputInMBperSec` |	This controls the maximum disk bandwidth consumed by diskbalancer while copying data. If a value like 10MB is specified then diskbalancer on the average will only copy 10MB/S. The default value is 10MB/S.|
-|`dfs.disk.balancer.max.disk.errors`|	sets the value of maximum number of errors we can ignore for a specific move between two disks before it is abandoned. For example, if a plan has 3 pair of disks to copy between , and the first disk set encounters more than 5 errors, then we abandon the first copy and start the second copy in the plan. The default value of max errors is set to 5.|
-|`dfs.disk.balancer.block.tolerance.percent`|	The tolerance percent sepcifies when we have reached a good enough value for any copy step. For example, if you specify 10% then getting close to 10% of the target value is good enough.|
+|`dfs.disk.balancer.enabled`| This parameter controls if diskbalancer is enabled for a cluster. if this is not enabled, any execute command will be rejected by the datanode.The default value is false.|
+|`dfs.disk.balancer.max.disk.throughputInMBperSec` | This controls the maximum disk bandwidth consumed by diskbalancer while copying data. If a value like 10MB is specified then diskbalancer on the average will only copy 10MB/S. The default value is 10MB/S.|
+|`dfs.disk.balancer.max.disk.errors`| sets the value of maximum number of errors we can ignore for a specific move between two disks before it is abandoned. For example, if a plan has 3 pair of disks to copy between , and the first disk set encounters more than 5 errors, then we abandon the first copy and start the second copy in the plan. The default value of max errors is set to 5.|
+|`dfs.disk.balancer.block.tolerance.percent`| The tolerance percent sepcifies when we have reached a good enough value for any copy step. For example, if you specify 10% then getting close to 10% of the target value is good enough.|


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org