You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ro...@apache.org on 2019/03/12 16:05:36 UTC

[hadoop] branch trunk updated: YARN-9336. JobHistoryServer leaks CLOSE_WAIT tcp connections when using LogAggregationIndexedFileController. Contributed by Tarun Parimi.

This is an automated email from the ASF dual-hosted git repository.

rohithsharmaks pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new c24af4b  YARN-9336. JobHistoryServer leaks CLOSE_WAIT tcp connections when using LogAggregationIndexedFileController. Contributed by Tarun Parimi.
c24af4b is described below

commit c24af4b0d6fc32938b076161b5a8c86d38e3e0a1
Author: Rohith Sharma K S <ro...@apache.org>
AuthorDate: Tue Mar 12 20:57:27 2019 +0530

    YARN-9336. JobHistoryServer leaks CLOSE_WAIT tcp connections when using LogAggregationIndexedFileController. Contributed by Tarun Parimi.
---
 .../ifile/IndexedFileAggregatedLogsBlock.java      | 64 +++++++++++-----------
 1 file changed, 32 insertions(+), 32 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
index eb9634b..c49d372 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
@@ -202,38 +202,38 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock {
     Decompressor decompressor = compressName.getDecompressor();
     FileContext fileContext = FileContext.getFileContext(
         thisNodeFile.getPath().toUri(), conf);
-    FSDataInputStream fsin = fileContext.open(thisNodeFile.getPath());
-    int bufferSize = 65536;
-    for (IndexedFileLogMeta candidate : candidates) {
-      if (candidate.getLastModifiedTime() < startTime
-          || candidate.getLastModifiedTime() > endTime) {
-        continue;
-      }
-      byte[] cbuf = new byte[bufferSize];
-      InputStream in = null;
-      try {
-        in = compressName.createDecompressionStream(
-            new BoundedRangeFileInputStream(fsin, candidate.getStartIndex(),
-                candidate.getFileCompressedSize()), decompressor,
-            LogAggregationIndexedFileController.getFSInputBufferSize(conf));
-        long logLength = candidate.getFileSize();
-        html.pre().__("\n\n").__();
-        html.p().__("Log Type: " + candidate.getFileName()).__();
-        html.p().__(
-            "Log Upload Time: " + Times.format(candidate.getLastModifiedTime()))
-            .__();
-        html.p().__("Log Length: " + Long.toString(logLength)).__();
-
-        long[] range = checkParseRange(html, start, end, startTime, endTime,
-            logLength, candidate.getFileName());
-        processContainerLog(html, range, in, bufferSize, cbuf);
-
-        foundLog = true;
-      } catch (Exception ex) {
-        LOG.error("Error getting logs for " + logEntity, ex);
-        continue;
-      } finally {
-        IOUtils.closeQuietly(in);
+    try (FSDataInputStream fsin = fileContext.open(thisNodeFile.getPath())) {
+      int bufferSize = 65536;
+      for (IndexedFileLogMeta candidate : candidates) {
+        if (candidate.getLastModifiedTime() < startTime
+            || candidate.getLastModifiedTime() > endTime) {
+          continue;
+        }
+        byte[] cbuf = new byte[bufferSize];
+        InputStream in = null;
+        try {
+          in = compressName.createDecompressionStream(
+              new BoundedRangeFileInputStream(fsin, candidate.getStartIndex(),
+                  candidate.getFileCompressedSize()), decompressor,
+              LogAggregationIndexedFileController.getFSInputBufferSize(conf));
+          long logLength = candidate.getFileSize();
+          html.pre().__("\n\n").__();
+          html.p().__("Log Type: " + candidate.getFileName()).__();
+          html.p().__("Log Upload Time: " +
+              Times.format(candidate.getLastModifiedTime())).__();
+          html.p().__("Log Length: " + Long.toString(logLength)).__();
+
+          long[] range = checkParseRange(html, start, end, startTime, endTime,
+              logLength, candidate.getFileName());
+          processContainerLog(html, range, in, bufferSize, cbuf);
+
+          foundLog = true;
+        } catch (Exception ex) {
+          LOG.error("Error getting logs for " + logEntity, ex);
+          continue;
+        } finally {
+          IOUtils.closeQuietly(in);
+        }
       }
     }
     return foundLog;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org