You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/05/21 18:01:46 UTC
[1/7] hadoop git commit: YARN-3654. ContainerLogsPage web UI should
not have meta-refresh. Contributed by Xuan Gong
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7240 8966d4217 -> 2b6bcfdaf
YARN-3654. ContainerLogsPage web UI should not have meta-refresh. Contributed by Xuan Gong
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6329bd00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6329bd00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6329bd00
Branch: refs/heads/HDFS-7240
Commit: 6329bd00fa1f17cc9555efa496ea7607ad93e0ce
Parents: 8966d42
Author: Jian He <ji...@apache.org>
Authored: Wed May 20 17:20:21 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Wed May 20 17:20:21 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../nodemanager/webapp/ContainerLogsPage.java | 3 -
.../server/nodemanager/webapp/NMController.java | 40 +------
.../nodemanager/webapp/NMWebAppFilter.java | 118 +++++++++++++++++++
.../server/nodemanager/webapp/WebServer.java | 7 +-
5 files changed, 128 insertions(+), 43 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dfbc06e..3cba027 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -426,6 +426,9 @@ Release 2.8.0 - UNRELEASED
YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted.
(Varun Vasudev via jianhe)
+ YARN-3654. ContainerLogsPage web UI should not have meta-refresh. (Xuan Gong
+ via jianhe)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
index 48e0c87..2fd6b2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
@@ -59,9 +59,6 @@ public class ContainerLogsPage extends NMView {
if (redirectUrl.equals("false")) {
set(TITLE, join("Failed redirect for ", $(CONTAINER_ID)));
//Error getting redirect url. Fall through.
- } else {
- set(TITLE, join("Redirecting to log server for ", $(CONTAINER_ID)));
- html.meta_http("refresh", "1; url=" + redirectUrl);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
index 097532f..5be5b35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java
@@ -20,29 +20,16 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.nodemanager.Context;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
-import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.Controller;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import com.google.inject.Inject;
public class NMController extends Controller implements YarnWebParams {
-
- private Context nmContext;
- private Configuration nmConf;
@Inject
- public NMController(Configuration nmConf, RequestContext requestContext,
- Context nmContext) {
+ public NMController(RequestContext requestContext) {
super(requestContext);
- this.nmContext = nmContext;
- this.nmConf = nmConf;
}
@Override
@@ -80,31 +67,6 @@ public class NMController extends Controller implements YarnWebParams {
}
public void logs() {
- String containerIdStr = $(CONTAINER_ID);
- ContainerId containerId = null;
- try {
- containerId = ConverterUtils.toContainerId(containerIdStr);
- } catch (IllegalArgumentException e) {
- render(ContainerLogsPage.class);
- return;
- }
- ApplicationId appId =
- containerId.getApplicationAttemptId().getApplicationId();
- Application app = nmContext.getApplications().get(appId);
- if (app == null
- && nmConf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
- YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
- String logServerUrl = nmConf.get(YarnConfiguration.YARN_LOG_SERVER_URL);
- String redirectUrl = null;
- if (logServerUrl == null || logServerUrl.isEmpty()) {
- redirectUrl = "false";
- } else {
- redirectUrl =
- url(logServerUrl, nmContext.getNodeId().toString(), containerIdStr,
- containerIdStr, $(APP_OWNER));
- }
- set(ContainerLogsPage.REDIRECT_URL, redirectUrl);
- }
render(ContainerLogsPage.class);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
new file mode 100644
index 0000000..63fe6ea
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebAppFilter.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.webapp;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import javax.servlet.FilterChain;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HtmlQuoting;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
+import com.google.inject.Injector;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+
+@Singleton
+public class NMWebAppFilter extends GuiceContainer{
+
+ private Injector injector;
+ private Context nmContext;
+
+ private static final long serialVersionUID = 1L;
+
+ @Inject
+ public NMWebAppFilter(Injector injector, Context nmContext) {
+ super(injector);
+ this.injector = injector;
+ this.nmContext = nmContext;
+ }
+
+ @Override
+ public void doFilter(HttpServletRequest request,
+ HttpServletResponse response, FilterChain chain) throws IOException,
+ ServletException {
+ String uri = HtmlQuoting.quoteHtmlChars(request.getRequestURI());
+ String redirectPath = containerLogPageRedirectPath(uri);
+ if (redirectPath != null) {
+ String redirectMsg =
+ "Redirecting to log server" + " : " + redirectPath;
+ PrintWriter out = response.getWriter();
+ out.println(redirectMsg);
+ response.setHeader("Location", redirectPath);
+ response.setStatus(HttpServletResponse.SC_TEMPORARY_REDIRECT);
+ return;
+ }
+ super.doFilter(request, response, chain);
+ }
+
+ private String containerLogPageRedirectPath(String uri) {
+ String redirectPath = null;
+ if (!uri.contains("/ws/v1/node") && uri.contains("/containerlogs")) {
+ String[] parts = uri.split("/");
+ String containerIdStr = parts[3];
+ String appOwner = parts[4];
+ if (containerIdStr != null && !containerIdStr.isEmpty()) {
+ ContainerId containerId = null;
+ try {
+ containerId = ContainerId.fromString(containerIdStr);
+ } catch (IllegalArgumentException ex) {
+ return redirectPath;
+ }
+ ApplicationId appId =
+ containerId.getApplicationAttemptId().getApplicationId();
+ Application app = nmContext.getApplications().get(appId);
+ Configuration nmConf = nmContext.getLocalDirsHandler().getConfig();
+ if (app == null
+ && nmConf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
+ YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
+ String logServerUrl =
+ nmConf.get(YarnConfiguration.YARN_LOG_SERVER_URL);
+ if (logServerUrl != null && !logServerUrl.isEmpty()) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(logServerUrl);
+ sb.append("/");
+ sb.append(nmContext.getNodeId().toString());
+ sb.append("/");
+ sb.append(containerIdStr);
+ sb.append("/");
+ sb.append(containerIdStr);
+ sb.append("/");
+ sb.append(appOwner);
+ redirectPath = sb.toString();
+ } else {
+ injector.getInstance(RequestContext.class).set(
+ ContainerLogsPage.REDIRECT_URL, "false");
+ }
+ }
+ }
+ }
+ return redirectPath;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6329bd00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
index 8c0d149..0e0c1e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
@@ -22,7 +22,6 @@ import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -36,6 +35,8 @@ import org.apache.hadoop.yarn.webapp.WebApps;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+
public class WebServer extends AbstractService {
private static final Log LOG = LogFactory.getLog(WebServer.class);
@@ -129,5 +130,9 @@ public class WebServer extends AbstractService {
route("/errors-and-warnings", NMController.class, "errorsAndWarnings");
}
+ @Override
+ protected Class<? extends GuiceContainer> getWebAppFilterClass() {
+ return NMWebAppFilter.class;
+ }
}
}
[4/7] hadoop git commit: HADOOP-10366. Add whitespaces between
classes for values in core-default.xml to fit better in browser. Contributed
by kanaka kumar avvaru.
Posted by ji...@apache.org.
HADOOP-10366. Add whitespaces between classes for values in core-default.xml to fit better in browser. Contributed by kanaka kumar avvaru.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e4f1081
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e4f1081
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e4f1081
Branch: refs/heads/HDFS-7240
Commit: 0e4f1081c7a98e1c0c4f922f5e2afe467a0d763f
Parents: dc8434a
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu May 21 17:52:03 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu May 21 17:52:03 2015 +0900
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++++
.../hadoop-common/src/main/resources/core-default.xml | 4 ++--
.../hadoop-hdfs/src/site/markdown/TransparentEncryption.md | 2 +-
3 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 416b819..aff9368 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -593,6 +593,10 @@ Release 2.8.0 - UNRELEASED
HADOOP-11995. Make jetty version configurable from the maven command line.
(Sriharsha Devineni via wheat9)
+ HADOOP-10366. Add whitespaces between classes for values in
+ core-default.xml to fit better in browser.
+ (kanaka kumar avvaru via aajisaka)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 97e01a8..a1bc780 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -466,7 +466,7 @@ for ldap providers in the same way as above does.
<property>
<name>io.serializations</name>
- <value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
+ <value>org.apache.hadoop.io.serializer.WritableSerialization, org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
<description>A list of serialization classes that can be used for
obtaining serializers and deserializers.</description>
</property>
@@ -1655,7 +1655,7 @@ for ldap providers in the same way as above does.
<property>
<name>hadoop.security.crypto.codec.classes.aes.ctr.nopadding</name>
- <value>org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec</value>
+ <value>org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, org.apache.hadoop.crypto.JceAesCtrCryptoCodec</value>
<description>
Comma-separated list of crypto codec implementations for AES/CTR/NoPadding.
The first implementation will be used if available, others are fallbacks.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4f1081/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index aa2acbd..05e4249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -126,7 +126,7 @@ The prefix for a given crypto codec, contains a comma-separated list of implemen
#### hadoop.security.crypto.codec.classes.aes.ctr.nopadding
-Default: `org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
+Default: `org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, org.apache.hadoop.crypto.JceAesCtrCryptoCodec`
Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. The first implementation will be used if available, others are fallbacks.
[3/7] hadoop git commit: HDFS-4383. Document the lease limits.
Contributed by Arshad Mohammad.
Posted by ji...@apache.org.
HDFS-4383. Document the lease limits. Contributed by Arshad Mohammad.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc8434ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc8434ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc8434ab
Branch: refs/heads/HDFS-7240
Commit: dc8434ab2b177ca9673bd8eecf7b185d4c4ffb31
Parents: fb6b38d
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu May 21 17:30:43 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu May 21 17:30:43 2015 +0900
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hdfs/server/common/HdfsServerConstants.java | 23 +++++++++++++++++---
2 files changed, 22 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8434ab/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5bcaddd..77d7369 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -575,6 +575,8 @@ Release 2.8.0 - UNRELEASED
HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)
+ HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc8434ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index c664b01..26a7ab3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -42,10 +42,27 @@ import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private
public interface HdfsServerConstants {
int MIN_BLOCKS_FOR_WRITE = 1;
- //
- // Timeouts, constants
- //
+ /**
+ * For a HDFS client to write to a file, a lease is granted; During the lease
+ * period, no other client can write to the file. The writing client can
+ * periodically renew the lease. When the file is closed, the lease is
+ * revoked. The lease duration is bound by this soft limit and a
+ * {@link HdfsServerConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
+ * soft limit expires, the writer has sole write access to the file. If the
+ * soft limit expires and the client fails to close the file or renew the
+ * lease, another client can preempt the lease.
+ */
long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+ /**
+ * For a HDFS client to write to a file, a lease is granted; During the lease
+ * period, no other client can write to the file. The writing client can
+ * periodically renew the lease. When the file is closed, the lease is
+ * revoked. The lease duration is bound by a
+ * {@link HdfsServerConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
+ * limit. If after the hard limit expires and the client has failed to renew
+ * the lease, HDFS assumes that the client has quit and will automatically
+ * close the file on behalf of the writer, and recover the lease.
+ */
long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
// We need to limit the length and depth of a path in the filesystem.
[7/7] hadoop git commit: HDFS-8421. Move startFile() and related
functions into FSDirWriteFileOp. Contributed by Haohui Mai.
Posted by ji...@apache.org.
HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b6bcfda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b6bcfda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b6bcfda
Branch: refs/heads/HDFS-7240
Commit: 2b6bcfdafa91223a4116e3e9304579f5f91dccac
Parents: 0305316
Author: Haohui Mai <wh...@apache.org>
Authored: Thu May 21 08:05:10 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Thu May 21 08:08:28 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/FSDirWriteFileOp.java | 324 ++++++++++++++++++-
.../hdfs/server/namenode/FSDirectory.java | 91 ------
.../hdfs/server/namenode/FSEditLogLoader.java | 15 +-
.../hdfs/server/namenode/FSNamesystem.java | 280 +++-------------
5 files changed, 371 insertions(+), 342 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 77d7369..9cfad7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -577,6 +577,9 @@ Release 2.8.0 - UNRELEASED
HDFS-4383. Document the lease limits. (Arshad Mohammad via aajisaka)
+ HDFS-8421. Move startFile() and related functions into FSDirWriteFileOp.
+ (wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 1ff0899..307bd59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,11 +18,27 @@
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
+import org.apache.commons.io.Charsets;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -34,15 +50,22 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ChunkedArrayList;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
+import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
+import static org.apache.hadoop.util.Time.now;
+
class FSDirWriteFileOp {
private FSDirWriteFileOp() {}
static boolean unprotectedRemoveBlock(
@@ -278,6 +301,210 @@ class FSDirWriteFileOp {
}
/**
+ * Create a new file or overwrite an existing file<br>
+ *
+ * Once the file is create the client then allocates a new block with the next
+ * call using {@link ClientProtocol#addBlock}.
+ * <p>
+ * For description of parameters and exceptions thrown see
+ * {@link ClientProtocol#create}
+ */
+ static HdfsFileStatus startFile(
+ FSNamesystem fsn, FSPermissionChecker pc, String src,
+ PermissionStatus permissions, String holder, String clientMachine,
+ EnumSet<CreateFlag> flag, boolean createParent,
+ short replication, long blockSize,
+ EncryptionKeyInfo ezInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
+ boolean logRetryEntry)
+ throws IOException {
+ assert fsn.hasWriteLock();
+
+ boolean create = flag.contains(CreateFlag.CREATE);
+ boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
+ boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
+
+ CipherSuite suite = null;
+ CryptoProtocolVersion version = null;
+ KeyProviderCryptoExtension.EncryptedKeyVersion edek = null;
+
+ if (ezInfo != null) {
+ edek = ezInfo.edek;
+ suite = ezInfo.suite;
+ version = ezInfo.protocolVersion;
+ }
+
+ boolean isRawPath = FSDirectory.isReservedRawName(src);
+ FSDirectory fsd = fsn.getFSDirectory();
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+ src = fsd.resolvePath(pc, src, pathComponents);
+ INodesInPath iip = fsd.getINodesInPath4Write(src);
+
+ // Verify that the destination does not exist as a directory already.
+ final INode inode = iip.getLastINode();
+ if (inode != null && inode.isDirectory()) {
+ throw new FileAlreadyExistsException(src +
+ " already exists as a directory");
+ }
+
+ final INodeFile myFile = INodeFile.valueOf(inode, src, true);
+ if (fsd.isPermissionEnabled()) {
+ if (overwrite && myFile != null) {
+ fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+ }
+ /*
+ * To overwrite existing file, need to check 'w' permission
+ * of parent (equals to ancestor in this case)
+ */
+ fsd.checkAncestorAccess(pc, iip, FsAction.WRITE);
+ }
+
+ if (!createParent) {
+ fsd.verifyParentDir(iip, src);
+ }
+
+ if (myFile == null && !create) {
+ throw new FileNotFoundException("Can't overwrite non-existent " +
+ src + " for client " + clientMachine);
+ }
+
+ FileEncryptionInfo feInfo = null;
+
+ final EncryptionZone zone = fsd.getEZForPath(iip);
+ if (zone != null) {
+ // The path is now within an EZ, but we're missing encryption parameters
+ if (suite == null || edek == null) {
+ throw new RetryStartFileException();
+ }
+ // Path is within an EZ and we have provided encryption parameters.
+ // Make sure that the generated EDEK matches the settings of the EZ.
+ final String ezKeyName = zone.getKeyName();
+ if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
+ throw new RetryStartFileException();
+ }
+ feInfo = new FileEncryptionInfo(suite, version,
+ edek.getEncryptedKeyVersion().getMaterial(),
+ edek.getEncryptedKeyIv(),
+ ezKeyName, edek.getEncryptionKeyVersionName());
+ }
+
+ if (myFile != null) {
+ if (overwrite) {
+ List<INode> toRemoveINodes = new ChunkedArrayList<>();
+ List<Long> toRemoveUCFiles = new ChunkedArrayList<>();
+ long ret = FSDirDeleteOp.delete(fsd, iip, toRemoveBlocks,
+ toRemoveINodes, toRemoveUCFiles, now());
+ if (ret >= 0) {
+ iip = INodesInPath.replace(iip, iip.length() - 1, null);
+ FSDirDeleteOp.incrDeletedFileCount(ret);
+ fsn.removeLeasesAndINodes(toRemoveUCFiles, toRemoveINodes, true);
+ }
+ } else {
+ // If lease soft limit time is expired, recover the lease
+ fsn.recoverLeaseInternal(FSNamesystem.RecoverLeaseOp.CREATE_FILE, iip,
+ src, holder, clientMachine, false);
+ throw new FileAlreadyExistsException(src + " for client " +
+ clientMachine + " already exists");
+ }
+ }
+ fsn.checkFsObjectLimit();
+ INodeFile newNode = null;
+ Map.Entry<INodesInPath, String> parent = FSDirMkdirOp
+ .createAncestorDirectories(fsd, iip, permissions);
+ if (parent != null) {
+ iip = addFile(fsd, parent.getKey(), parent.getValue(), permissions,
+ replication, blockSize, holder, clientMachine);
+ newNode = iip != null ? iip.getLastINode().asFile() : null;
+ }
+ if (newNode == null) {
+ throw new IOException("Unable to add " + src + " to namespace");
+ }
+ fsn.leaseManager.addLease(
+ newNode.getFileUnderConstructionFeature().getClientName(),
+ newNode.getId());
+ if (feInfo != null) {
+ fsd.setFileEncryptionInfo(src, feInfo);
+ newNode = fsd.getInode(newNode.getId()).asFile();
+ }
+ setNewINodeStoragePolicy(fsn.getBlockManager(), newNode, iip,
+ isLazyPersist);
+ fsd.getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
+ if (NameNode.stateChangeLog.isDebugEnabled()) {
+ NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
+ src + " inode " + newNode.getId() + " " + holder);
+ }
+ return FSDirStatAndListingOp.getFileInfo(fsd, src, false, isRawPath, true);
+ }
+
+ static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn,
+ FSPermissionChecker pc, String src,
+ CryptoProtocolVersion[] supportedVersions)
+ throws IOException {
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+ FSDirectory fsd = fsn.getFSDirectory();
+ src = fsd.resolvePath(pc, src, pathComponents);
+ INodesInPath iip = fsd.getINodesInPath4Write(src);
+ // Nothing to do if the path is not within an EZ
+ final EncryptionZone zone = fsd.getEZForPath(iip);
+ if (zone == null) {
+ return null;
+ }
+ CryptoProtocolVersion protocolVersion = fsn.chooseProtocolVersion(
+ zone, supportedVersions);
+ CipherSuite suite = zone.getSuite();
+ String ezKeyName = zone.getKeyName();
+
+ Preconditions.checkNotNull(protocolVersion);
+ Preconditions.checkNotNull(suite);
+ Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
+ "Chose an UNKNOWN CipherSuite!");
+ Preconditions.checkNotNull(ezKeyName);
+ return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName);
+ }
+
+ static INodeFile addFileForEditLog(
+ FSDirectory fsd, long id, INodesInPath existing, byte[] localName,
+ PermissionStatus permissions, List<AclEntry> aclEntries,
+ List<XAttr> xAttrs, short replication, long modificationTime, long atime,
+ long preferredBlockSize, boolean underConstruction, String clientName,
+ String clientMachine, byte storagePolicyId) {
+ final INodeFile newNode;
+ assert fsd.hasWriteLock();
+ if (underConstruction) {
+ newNode = newINodeFile(id, permissions, modificationTime,
+ modificationTime, replication,
+ preferredBlockSize,
+ storagePolicyId);
+ newNode.toUnderConstruction(clientName, clientMachine);
+ } else {
+ newNode = newINodeFile(id, permissions, modificationTime,
+ atime, replication,
+ preferredBlockSize,
+ storagePolicyId);
+ }
+
+ newNode.setLocalName(localName);
+ try {
+ INodesInPath iip = fsd.addINode(existing, newNode);
+ if (iip != null) {
+ if (aclEntries != null) {
+ AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID);
+ }
+ if (xAttrs != null) {
+ XAttrStorage.updateINodeXAttrs(newNode, xAttrs, CURRENT_STATE_ID);
+ }
+ return newNode;
+ }
+ } catch (IOException e) {
+ if(NameNode.stateChangeLog.isDebugEnabled()) {
+ NameNode.stateChangeLog.debug(
+ "DIR* FSDirectory.unprotectedAddFile: exception when add "
+ + existing.getPath() + " to the file system", e);
+ }
+ }
+ return null;
+ }
+
+ /**
* Add a block to the file. Returns a reference to the added block.
*/
private static BlockInfoContiguous addBlock(
@@ -314,6 +541,41 @@ class FSDirWriteFileOp {
}
}
+ /**
+ * Add the given filename to the fs.
+ * @return the new INodesInPath instance that contains the new INode
+ */
+ private static INodesInPath addFile(
+ FSDirectory fsd, INodesInPath existing, String localName,
+ PermissionStatus permissions, short replication, long preferredBlockSize,
+ String clientName, String clientMachine)
+ throws IOException {
+
+ long modTime = now();
+ INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
+ modTime, modTime, replication, preferredBlockSize);
+ newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
+ newNode.toUnderConstruction(clientName, clientMachine);
+
+ INodesInPath newiip;
+ fsd.writeLock();
+ try {
+ newiip = fsd.addINode(existing, newNode);
+ } finally {
+ fsd.writeUnlock();
+ }
+ if (newiip == null) {
+ NameNode.stateChangeLog.info("DIR* addFile: failed to add " +
+ existing.getPath() + "/" + localName);
+ return null;
+ }
+
+ if(NameNode.stateChangeLog.isDebugEnabled()) {
+ NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
+ }
+ return newiip;
+ }
+
private static FileState analyzeFileState(
FSNamesystem fsn, String src, long fileId, String clientName,
ExtendedBlock previous, LocatedBlock[] onRetryBlock)
@@ -345,8 +607,7 @@ class FSDirWriteFileOp {
src = iip.getPath();
}
}
- final INodeFile file = fsn.checkLease(src, clientName,
- inode, fileId);
+ final INodeFile file = fsn.checkLease(src, clientName, inode, fileId);
BlockInfoContiguous lastBlockInFile = file.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
@@ -497,6 +758,20 @@ class FSDirWriteFileOp {
return true;
}
+ private static INodeFile newINodeFile(
+ long id, PermissionStatus permissions, long mtime, long atime,
+ short replication, long preferredBlockSize, byte storagePolicyId) {
+ return new INodeFile(id, null, permissions, mtime, atime,
+ BlockInfoContiguous.EMPTY_ARRAY, replication, preferredBlockSize,
+ storagePolicyId);
+ }
+
+ private static INodeFile newINodeFile(long id, PermissionStatus permissions,
+ long mtime, long atime, short replication, long preferredBlockSize) {
+ return newINodeFile(id, permissions, mtime, atime, replication,
+ preferredBlockSize, (byte)0);
+ }
+
/**
* Persist the new block (the last block of the given file).
*/
@@ -533,6 +808,36 @@ class FSDirWriteFileOp {
DatanodeStorageInfo.incrementBlocksScheduled(targets);
}
+ private static void setNewINodeStoragePolicy(BlockManager bm, INodeFile
+ inode, INodesInPath iip, boolean isLazyPersist)
+ throws IOException {
+
+ if (isLazyPersist) {
+ BlockStoragePolicy lpPolicy =
+ bm.getStoragePolicy("LAZY_PERSIST");
+
+ // Set LAZY_PERSIST storage policy if the flag was passed to
+ // CreateFile.
+ if (lpPolicy == null) {
+ throw new HadoopIllegalArgumentException(
+ "The LAZY_PERSIST storage policy has been disabled " +
+ "by the administrator.");
+ }
+ inode.setStoragePolicyID(lpPolicy.getId(),
+ iip.getLatestSnapshotId());
+ } else {
+ BlockStoragePolicy effectivePolicy =
+ bm.getStoragePolicy(inode.getStoragePolicyID());
+
+ if (effectivePolicy != null &&
+ effectivePolicy.isCopyOnCreateFile()) {
+ // Copy effective policy from ancestor directory to current file.
+ inode.setStoragePolicyID(effectivePolicy.getId(),
+ iip.getLatestSnapshotId());
+ }
+ }
+ }
+
private static class FileState {
final INodeFile inode;
final String path;
@@ -560,4 +865,19 @@ class FSDirWriteFileOp {
this.clientMachine = clientMachine;
}
}
+
+ static class EncryptionKeyInfo {
+ final CryptoProtocolVersion protocolVersion;
+ final CipherSuite suite;
+ final String ezKeyName;
+ KeyProviderCryptoExtension.EncryptedKeyVersion edek;
+
+ EncryptionKeyInfo(
+ CryptoProtocolVersion protocolVersion, CipherSuite suite,
+ String ezKeyName) {
+ this.protocolVersion = protocolVersion;
+ this.suite = suite;
+ this.ezKeyName = ezKeyName;
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index c2ed956..8fdd2d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -21,13 +21,11 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.io.Charsets;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@@ -42,7 +40,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@@ -86,7 +83,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
-import static org.apache.hadoop.util.Time.now;
/**
* Both FSDirectory and FSNamesystem manage the state of the namespace.
@@ -388,93 +384,6 @@ public class FSDirectory implements Closeable {
skipQuotaCheck = true;
}
- private static INodeFile newINodeFile(long id, PermissionStatus permissions,
- long mtime, long atime, short replication, long preferredBlockSize) {
- return newINodeFile(id, permissions, mtime, atime, replication,
- preferredBlockSize, (byte)0);
- }
-
- private static INodeFile newINodeFile(long id, PermissionStatus permissions,
- long mtime, long atime, short replication, long preferredBlockSize,
- byte storagePolicyId) {
- return new INodeFile(id, null, permissions, mtime, atime,
- BlockInfoContiguous.EMPTY_ARRAY, replication, preferredBlockSize,
- storagePolicyId);
- }
-
- /**
- * Add the given filename to the fs.
- * @return the new INodesInPath instance that contains the new INode
- */
- INodesInPath addFile(INodesInPath existing, String localName, PermissionStatus
- permissions, short replication, long preferredBlockSize,
- String clientName, String clientMachine)
- throws FileAlreadyExistsException, QuotaExceededException,
- UnresolvedLinkException, SnapshotAccessControlException, AclException {
-
- long modTime = now();
- INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime,
- modTime, replication, preferredBlockSize);
- newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
- newNode.toUnderConstruction(clientName, clientMachine);
-
- INodesInPath newiip;
- writeLock();
- try {
- newiip = addINode(existing, newNode);
- } finally {
- writeUnlock();
- }
- if (newiip == null) {
- NameNode.stateChangeLog.info("DIR* addFile: failed to add " +
- existing.getPath() + "/" + localName);
- return null;
- }
-
- if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
- }
- return newiip;
- }
-
- INodeFile addFileForEditLog(long id, INodesInPath existing, byte[] localName,
- PermissionStatus permissions, List<AclEntry> aclEntries,
- List<XAttr> xAttrs, short replication, long modificationTime, long atime,
- long preferredBlockSize, boolean underConstruction, String clientName,
- String clientMachine, byte storagePolicyId) {
- final INodeFile newNode;
- assert hasWriteLock();
- if (underConstruction) {
- newNode = newINodeFile(id, permissions, modificationTime,
- modificationTime, replication, preferredBlockSize, storagePolicyId);
- newNode.toUnderConstruction(clientName, clientMachine);
- } else {
- newNode = newINodeFile(id, permissions, modificationTime, atime,
- replication, preferredBlockSize, storagePolicyId);
- }
-
- newNode.setLocalName(localName);
- try {
- INodesInPath iip = addINode(existing, newNode);
- if (iip != null) {
- if (aclEntries != null) {
- AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID);
- }
- if (xAttrs != null) {
- XAttrStorage.updateINodeXAttrs(newNode, xAttrs, CURRENT_STATE_ID);
- }
- return newNode;
- }
- } catch (IOException e) {
- if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug(
- "DIR* FSDirectory.unprotectedAddFile: exception when add "
- + existing.getPath() + " to the file system", e);
- }
- }
- return null;
- }
-
/**
* This is a wrapper for resolvePath(). If the path passed
* is prefixed with /.reserved/raw, then it checks to ensure that the caller
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index dec1298..476ff36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -364,15 +364,12 @@ public class FSEditLogLoader {
// add to the file tree
inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId);
- newFile = fsDir.addFileForEditLog(inodeId, iip.getExistingINodes(),
- iip.getLastLocalName(),
- addCloseOp.permissions,
- addCloseOp.aclEntries,
- addCloseOp.xAttrs, replication,
- addCloseOp.mtime, addCloseOp.atime,
- addCloseOp.blockSize, true,
- addCloseOp.clientName,
- addCloseOp.clientMachine,
+ newFile = FSDirWriteFileOp.addFileForEditLog(fsDir, inodeId,
+ iip.getExistingINodes(), iip.getLastLocalName(),
+ addCloseOp.permissions, addCloseOp.aclEntries,
+ addCloseOp.xAttrs, replication, addCloseOp.mtime,
+ addCloseOp.atime, addCloseOp.blockSize, true,
+ addCloseOp.clientName, addCloseOp.clientMachine,
addCloseOp.storagePolicyId);
iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6bcfda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7e5b981..bfd6eba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -151,7 +151,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
@@ -275,7 +274,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.DelegationKey;
-import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.ReflectionUtils;
@@ -2279,8 +2277,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @return chosen protocol version
* @throws IOException
*/
- private CryptoProtocolVersion chooseProtocolVersion(EncryptionZone zone,
- CryptoProtocolVersion[] supportedVersions)
+ CryptoProtocolVersion chooseProtocolVersion(
+ EncryptionZone zone, CryptoProtocolVersion[] supportedVersions)
throws UnknownCryptoProtocolVersionException, UnresolvedLinkException,
SnapshotAccessControlException {
Preconditions.checkNotNull(zone);
@@ -2342,11 +2340,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
String holder, String clientMachine, EnumSet<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, boolean logRetryCache)
- throws AccessControlException, SafeModeException,
- FileAlreadyExistsException, UnresolvedLinkException,
- FileNotFoundException, ParentNotDirectoryException, IOException {
+ throws IOException {
- HdfsFileStatus status = null;
+ HdfsFileStatus status;
try {
status = startFileInt(src, permissions, holder, clientMachine, flag,
createParent, replication, blockSize, supportedVersions,
@@ -2355,54 +2351,42 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
logAuditEvent(false, "create", src);
throw e;
}
+ logAuditEvent(true, "create", src, null, status);
return status;
}
- private HdfsFileStatus startFileInt(final String srcArg,
+ private HdfsFileStatus startFileInt(final String src,
PermissionStatus permissions, String holder, String clientMachine,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, CryptoProtocolVersion[] supportedVersions,
boolean logRetryCache)
- throws AccessControlException, SafeModeException,
- FileAlreadyExistsException, UnresolvedLinkException,
- FileNotFoundException, ParentNotDirectoryException, IOException {
- String src = srcArg;
+ throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
- builder.append("DIR* NameSystem.startFile: src=" + src
- + ", holder=" + holder
- + ", clientMachine=" + clientMachine
- + ", createParent=" + createParent
- + ", replication=" + replication
- + ", createFlag=" + flag.toString()
- + ", blockSize=" + blockSize);
- builder.append(", supportedVersions=");
- if (supportedVersions != null) {
- builder.append(Arrays.toString(supportedVersions));
- } else {
- builder.append("null");
- }
+ builder.append("DIR* NameSystem.startFile: src=").append(src)
+ .append(", holder=").append(holder)
+ .append(", clientMachine=").append(clientMachine)
+ .append(", createParent=").append(createParent)
+ .append(", replication=").append(replication)
+ .append(", createFlag=").append(flag.toString())
+ .append(", blockSize=").append(blockSize)
+ .append(", supportedVersions=")
+ .append(supportedVersions == null ? null : Arrays.toString
+ (supportedVersions));
NameNode.stateChangeLog.debug(builder.toString());
}
if (!DFSUtil.isValidName(src)) {
throw new InvalidPathException(src);
}
blockManager.verifyReplication(src, replication, clientMachine);
-
- boolean skipSync = false;
- HdfsFileStatus stat = null;
- FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
if (blockSize < minBlockSize) {
throw new IOException("Specified block size is less than configured" +
" minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY
+ "): " + blockSize + " < " + minBlockSize);
}
- byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
- boolean create = flag.contains(CreateFlag.CREATE);
- boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
- boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
+ FSPermissionChecker pc = getPermissionChecker();
waitForLoadingFSImage();
/**
@@ -2417,246 +2401,62 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* special RetryStartFileException to ask the DFSClient to try the create
* again later.
*/
- CryptoProtocolVersion protocolVersion = null;
- CipherSuite suite = null;
- String ezKeyName = null;
- EncryptedKeyVersion edek = null;
+ FSDirWriteFileOp.EncryptionKeyInfo ezInfo = null;
if (provider != null) {
readLock();
try {
- src = dir.resolvePath(pc, src, pathComponents);
- INodesInPath iip = dir.getINodesInPath4Write(src);
- // Nothing to do if the path is not within an EZ
- final EncryptionZone zone = dir.getEZForPath(iip);
- if (zone != null) {
- protocolVersion = chooseProtocolVersion(zone, supportedVersions);
- suite = zone.getSuite();
- ezKeyName = zone.getKeyName();
-
- Preconditions.checkNotNull(protocolVersion);
- Preconditions.checkNotNull(suite);
- Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
- "Chose an UNKNOWN CipherSuite!");
- Preconditions.checkNotNull(ezKeyName);
- }
+ checkOperation(OperationCategory.READ);
+ ezInfo = FSDirWriteFileOp
+ .getEncryptionKeyInfo(this, pc, src, supportedVersions);
} finally {
readUnlock();
}
- Preconditions.checkState(
- (suite == null && ezKeyName == null) ||
- (suite != null && ezKeyName != null),
- "Both suite and ezKeyName should both be null or not null");
-
// Generate EDEK if necessary while not holding the lock
- edek = generateEncryptedDataEncryptionKey(ezKeyName);
+ if (ezInfo != null) {
+ ezInfo.edek = generateEncryptedDataEncryptionKey(ezInfo.ezKeyName);
+ }
EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
}
- // Proceed with the create, using the computed cipher suite and
+ boolean skipSync = false;
+ HdfsFileStatus stat = null;
+
+ // Proceed with the create, using the computed cipher suite and
// generated EDEK
- BlocksMapUpdateInfo toRemoveBlocks = null;
+ BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create file" + src);
dir.writeLock();
try {
- src = dir.resolvePath(pc, src, pathComponents);
- final INodesInPath iip = dir.getINodesInPath4Write(src);
- toRemoveBlocks = startFileInternal(
- pc, iip, permissions, holder,
- clientMachine, create, overwrite,
- createParent, replication, blockSize,
- isLazyPersist, suite, protocolVersion, edek,
- logRetryCache);
- stat = FSDirStatAndListingOp.getFileInfo(
- dir, src, false, FSDirectory.isReservedRawName(srcArg), true);
+ stat = FSDirWriteFileOp.startFile(this, pc, src, permissions, holder,
+ clientMachine, flag, createParent,
+ replication, blockSize, ezInfo,
+ toRemoveBlocks, logRetryCache);
} finally {
dir.writeUnlock();
}
- } catch (StandbyException se) {
- skipSync = true;
- throw se;
+ } catch (IOException e) {
+ skipSync = e instanceof StandbyException;
+ throw e;
} finally {
writeUnlock();
// There might be transactions logged while trying to recover the lease.
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
- if (toRemoveBlocks != null) {
- removeBlocks(toRemoveBlocks);
- toRemoveBlocks.clear();
- }
+ removeBlocks(toRemoveBlocks);
+ toRemoveBlocks.clear();
}
}
- logAuditEvent(true, "create", srcArg, null, stat);
return stat;
}
/**
- * Create a new file or overwrite an existing file<br>
- *
- * Once the file is create the client then allocates a new block with the next
- * call using {@link ClientProtocol#addBlock}.
- * <p>
- * For description of parameters and exceptions thrown see
- * {@link ClientProtocol#create}
- */
- private BlocksMapUpdateInfo startFileInternal(FSPermissionChecker pc,
- INodesInPath iip, PermissionStatus permissions, String holder,
- String clientMachine, boolean create, boolean overwrite,
- boolean createParent, short replication, long blockSize,
- boolean isLazyPersist, CipherSuite suite, CryptoProtocolVersion version,
- EncryptedKeyVersion edek, boolean logRetryEntry)
- throws IOException {
- assert hasWriteLock();
- // Verify that the destination does not exist as a directory already.
- final INode inode = iip.getLastINode();
- final String src = iip.getPath();
- if (inode != null && inode.isDirectory()) {
- throw new FileAlreadyExistsException(src +
- " already exists as a directory");
- }
-
- final INodeFile myFile = INodeFile.valueOf(inode, src, true);
- if (isPermissionEnabled) {
- if (overwrite && myFile != null) {
- dir.checkPathAccess(pc, iip, FsAction.WRITE);
- }
- /*
- * To overwrite existing file, need to check 'w' permission
- * of parent (equals to ancestor in this case)
- */
- dir.checkAncestorAccess(pc, iip, FsAction.WRITE);
- }
- if (!createParent) {
- dir.verifyParentDir(iip, src);
- }
-
- FileEncryptionInfo feInfo = null;
-
- final EncryptionZone zone = dir.getEZForPath(iip);
- if (zone != null) {
- // The path is now within an EZ, but we're missing encryption parameters
- if (suite == null || edek == null) {
- throw new RetryStartFileException();
- }
- // Path is within an EZ and we have provided encryption parameters.
- // Make sure that the generated EDEK matches the settings of the EZ.
- final String ezKeyName = zone.getKeyName();
- if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
- throw new RetryStartFileException();
- }
- feInfo = new FileEncryptionInfo(suite, version,
- edek.getEncryptedKeyVersion().getMaterial(),
- edek.getEncryptedKeyIv(),
- ezKeyName, edek.getEncryptionKeyVersionName());
- }
-
- try {
- BlocksMapUpdateInfo toRemoveBlocks = null;
- if (myFile == null) {
- if (!create) {
- throw new FileNotFoundException("Can't overwrite non-existent " +
- src + " for client " + clientMachine);
- }
- } else {
- if (overwrite) {
- toRemoveBlocks = new BlocksMapUpdateInfo();
- List<INode> toRemoveINodes = new ChunkedArrayList<>();
- List<Long> toRemoveUCFiles = new ChunkedArrayList<>();
- long ret = FSDirDeleteOp.delete(
- dir, iip, toRemoveBlocks, toRemoveINodes,
- toRemoveUCFiles, now());
- if (ret >= 0) {
- iip = INodesInPath.replace(iip, iip.length() - 1, null);
- FSDirDeleteOp.incrDeletedFileCount(ret);
- removeLeasesAndINodes(toRemoveUCFiles, toRemoveINodes, true);
- }
- } else {
- // If lease soft limit time is expired, recover the lease
- recoverLeaseInternal(RecoverLeaseOp.CREATE_FILE,
- iip, src, holder, clientMachine, false);
- throw new FileAlreadyExistsException(src + " for client " +
- clientMachine + " already exists");
- }
- }
-
- checkFsObjectLimit();
- INodeFile newNode = null;
-
- // Always do an implicit mkdirs for parent directory tree.
- Map.Entry<INodesInPath, String> parent = FSDirMkdirOp
- .createAncestorDirectories(dir, iip, permissions);
- if (parent != null) {
- iip = dir.addFile(parent.getKey(), parent.getValue(), permissions,
- replication, blockSize, holder, clientMachine);
- newNode = iip != null ? iip.getLastINode().asFile() : null;
- }
-
- if (newNode == null) {
- throw new IOException("Unable to add " + src + " to namespace");
- }
- leaseManager.addLease(newNode.getFileUnderConstructionFeature()
- .getClientName(), newNode.getId());
-
- // Set encryption attributes if necessary
- if (feInfo != null) {
- dir.setFileEncryptionInfo(src, feInfo);
- newNode = dir.getInode(newNode.getId()).asFile();
- }
-
- setNewINodeStoragePolicy(newNode, iip, isLazyPersist);
-
- // record file record in log, record new generation stamp
- getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
- if (NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
- src + " inode " + newNode.getId() + " " + holder);
- }
- return toRemoveBlocks;
- } catch (IOException ie) {
- NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " + src + " " +
- ie.getMessage());
- throw ie;
- }
- }
-
- private void setNewINodeStoragePolicy(INodeFile inode,
- INodesInPath iip,
- boolean isLazyPersist)
- throws IOException {
-
- if (isLazyPersist) {
- BlockStoragePolicy lpPolicy =
- blockManager.getStoragePolicy("LAZY_PERSIST");
-
- // Set LAZY_PERSIST storage policy if the flag was passed to
- // CreateFile.
- if (lpPolicy == null) {
- throw new HadoopIllegalArgumentException(
- "The LAZY_PERSIST storage policy has been disabled " +
- "by the administrator.");
- }
- inode.setStoragePolicyID(lpPolicy.getId(),
- iip.getLatestSnapshotId());
- } else {
- BlockStoragePolicy effectivePolicy =
- blockManager.getStoragePolicy(inode.getStoragePolicyID());
-
- if (effectivePolicy != null &&
- effectivePolicy.isCopyOnCreateFile()) {
- // Copy effective policy from ancestor directory to current file.
- inode.setStoragePolicyID(effectivePolicy.getId(),
- iip.getLatestSnapshotId());
- }
- }
- }
-
- /**
* Append to an existing file for append.
* <p>
*
@@ -2871,7 +2671,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return false;
}
- private enum RecoverLeaseOp {
+ enum RecoverLeaseOp {
CREATE_FILE,
APPEND_FILE,
TRUNCATE_FILE,
[6/7] hadoop git commit: YARN-3646. Applications are getting stuck
some times in case of retry policy forever. Contributed by Raju Bairishetti.
Posted by ji...@apache.org.
YARN-3646. Applications are getting stuck some times in case of retry
policy forever. Contributed by Raju Bairishetti.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0305316d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0305316d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0305316d
Branch: refs/heads/HDFS-7240
Commit: 0305316d6932e6f1a05021354d77b6934e57e171
Parents: a5def58
Author: Devaraj K <de...@apache.org>
Authored: Thu May 21 20:14:44 2015 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Thu May 21 20:14:44 2015 +0530
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../yarn/client/api/impl/TestYarnClient.java | 32 ++++++++++++++++++++
.../org/apache/hadoop/yarn/client/RMProxy.java | 15 ++++-----
3 files changed, 43 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d1d2258..e5a9ee9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -538,6 +538,9 @@ Release 2.7.1 - UNRELEASED
YARN-3694. Fix dead link for TimelineServer REST API.
(Jagadesh Kiran N via aajisaka)
+ YARN-3646. Applications are getting stuck some times in case of retry
+ policy forever. (Raju Bairishetti via devaraj)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 511fa4a..bc40b9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1265,4 +1265,36 @@ public class TestYarnClient {
ReservationSystemTestUtil.reservationQ);
return request;
}
+
+ @Test(timeout = 30000, expected = ApplicationNotFoundException.class)
+ public void testShouldNotRetryForeverForNonNetworkExceptions() throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
+
+ ResourceManager rm = null;
+ YarnClient yarnClient = null;
+ try {
+ // start rm
+ rm = new ResourceManager();
+ rm.init(conf);
+ rm.start();
+
+ yarnClient = YarnClient.createYarnClient();
+ yarnClient.init(conf);
+ yarnClient.start();
+
+ // create invalid application id
+ ApplicationId appId = ApplicationId.newInstance(1430126768L, 10645);
+
+ // RM should throw ApplicationNotFoundException exception
+ yarnClient.getApplicationReport(appId);
+ } finally {
+ if (yarnClient != null) {
+ yarnClient.stop();
+ }
+ if (rm != null) {
+ rm.stop();
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0305316d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index fa8d642..28628f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -224,19 +224,20 @@ public class RMProxy<T> {
failoverSleepBaseMs, failoverSleepMaxMs);
}
- if (waitForEver) {
- return RetryPolicies.RETRY_FOREVER;
- }
-
if (rmConnectionRetryIntervalMS < 0) {
throw new YarnRuntimeException("Invalid Configuration. " +
YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
" should not be negative.");
}
- RetryPolicy retryPolicy =
- RetryPolicies.retryUpToMaximumTimeWithFixedSleep(rmConnectWaitMS,
- rmConnectionRetryIntervalMS, TimeUnit.MILLISECONDS);
+ RetryPolicy retryPolicy = null;
+ if (waitForEver) {
+ retryPolicy = RetryPolicies.RETRY_FOREVER;
+ } else {
+ retryPolicy =
+ RetryPolicies.retryUpToMaximumTimeWithFixedSleep(rmConnectWaitMS,
+ rmConnectionRetryIntervalMS, TimeUnit.MILLISECONDS);
+ }
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
[2/7] hadoop git commit: HADOOP-11772. RPC Invoker relies on static
ClientCache which has synchronized(this) blocks. Contributed by Haohui Mai.
Posted by ji...@apache.org.
HADOOP-11772. RPC Invoker relies on static ClientCache which has synchronized(this) blocks. Contributed by Haohui Mai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb6b38d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb6b38d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb6b38d6
Branch: refs/heads/HDFS-7240
Commit: fb6b38d67d8b997eca498fc5010b037e3081ace7
Parents: 6329bd0
Author: Haohui Mai <wh...@apache.org>
Authored: Wed May 20 20:10:50 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed May 20 20:10:50 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../main/java/org/apache/hadoop/ipc/Client.java | 106 ++++++-------------
2 files changed, 35 insertions(+), 74 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb6b38d6/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1624ce2..416b819 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -604,6 +604,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11970. Replace uses of ThreadLocal<Random> with JDK7
ThreadLocalRandom. (Sean Busbey via Colin P. McCabe)
+ HADOOP-11772. RPC Invoker relies on static ClientCache which has
+ synchronized(this) blocks. (wheat9)
+
BUG FIXES
HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
is an I/O error during requestShortCircuitShm (cmccabe)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb6b38d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index f28d8a2..feb811e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -43,6 +43,7 @@ import java.util.Iterator;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
+import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -56,6 +57,8 @@ import java.util.concurrent.atomic.AtomicLong;
import javax.net.SocketFactory;
import javax.security.sasl.Sasl;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -124,8 +127,8 @@ public class Client {
retryCount.set(rc);
}
- private Hashtable<ConnectionId, Connection> connections =
- new Hashtable<ConnectionId, Connection>();
+ private final Cache<ConnectionId, Connection> connections =
+ CacheBuilder.newBuilder().build();
private Class<? extends Writable> valueClass; // class of call values
private AtomicBoolean running = new AtomicBoolean(true); // if client runs
@@ -1167,13 +1170,7 @@ public class Client {
return;
}
- // release the resources
- // first thing to do;take the connection out of the connection list
- synchronized (connections) {
- if (connections.get(remoteId) == this) {
- connections.remove(remoteId);
- }
- }
+ connections.invalidate(remoteId);
// close the streams and therefore the socket
IOUtils.closeStream(out);
@@ -1260,14 +1257,12 @@ public class Client {
}
// wake up all connections
- synchronized (connections) {
- for (Connection conn : connections.values()) {
- conn.interrupt();
- }
+ for (Connection conn : connections.asMap().values()) {
+ conn.interrupt();
}
// wait until all connections are closed
- while (!connections.isEmpty()) {
+ while (connections.size() > 0) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
@@ -1283,56 +1278,12 @@ public class Client {
*/
public Writable call(Writable param, InetSocketAddress address)
throws IOException {
- return call(RPC.RpcKind.RPC_BUILTIN, param, address);
-
- }
- /** Make a call, passing <code>param</code>, to the IPC server running at
- * <code>address</code>, returning the value. Throws exceptions if there are
- * network problems or if the remote code threw an exception.
- * @deprecated Use {@link #call(RPC.RpcKind, Writable,
- * ConnectionId)} instead
- */
- @Deprecated
- public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress address)
- throws IOException {
- return call(rpcKind, param, address, null);
- }
-
- /** Make a call, passing <code>param</code>, to the IPC server running at
- * <code>address</code> with the <code>ticket</code> credentials, returning
- * the value.
- * Throws exceptions if there are network problems or if the remote code
- * threw an exception.
- * @deprecated Use {@link #call(RPC.RpcKind, Writable,
- * ConnectionId)} instead
- */
- @Deprecated
- public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr,
- UserGroupInformation ticket) throws IOException {
- ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
+ ConnectionId remoteId = ConnectionId.getConnectionId(address, null, null, 0,
conf);
- return call(rpcKind, param, remoteId);
- }
-
- /** Make a call, passing <code>param</code>, to the IPC server running at
- * <code>address</code> which is servicing the <code>protocol</code> protocol,
- * with the <code>ticket</code> credentials and <code>rpcTimeout</code> as
- * timeout, returning the value.
- * Throws exceptions if there are network problems or if the remote code
- * threw an exception.
- * @deprecated Use {@link #call(RPC.RpcKind, Writable,
- * ConnectionId)} instead
- */
- @Deprecated
- public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr,
- Class<?> protocol, UserGroupInformation ticket,
- int rpcTimeout) throws IOException {
- ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
- ticket, rpcTimeout, conf);
- return call(rpcKind, param, remoteId);
+ return call(RpcKind.RPC_BUILTIN, param, remoteId);
+
}
-
/**
* Same as {@link #call(RPC.RpcKind, Writable, InetSocketAddress,
* Class, UserGroupInformation, int, Configuration)}
@@ -1506,15 +1457,14 @@ public class Client {
@InterfaceAudience.Private
@InterfaceStability.Unstable
Set<ConnectionId> getConnectionIds() {
- synchronized (connections) {
- return connections.keySet();
- }
+ return connections.asMap().keySet();
}
/** Get a connection from the pool, or create a new one and add it to the
* pool. Connections to a given ConnectionId are reused. */
- private Connection getConnection(ConnectionId remoteId,
- Call call, int serviceClass, AtomicBoolean fallbackToSimpleAuth)
+ private Connection getConnection(
+ final ConnectionId remoteId,
+ Call call, final int serviceClass, AtomicBoolean fallbackToSimpleAuth)
throws IOException {
if (!running.get()) {
// the client is stopped
@@ -1525,15 +1475,23 @@ public class Client {
* connectionsId object and with set() method. We need to manage the
* refs for keys in HashMap properly. For now its ok.
*/
- do {
- synchronized (connections) {
- connection = connections.get(remoteId);
- if (connection == null) {
- connection = new Connection(remoteId, serviceClass);
- connections.put(remoteId, connection);
- }
+ while(true) {
+ try {
+ connection = connections.get(remoteId, new Callable<Connection>() {
+ @Override
+ public Connection call() throws Exception {
+ return new Connection(remoteId, serviceClass);
+ }
+ });
+ } catch (ExecutionException e) {
+ throw new IOException(e);
+ }
+ if (connection.addCall(call)) {
+ break;
+ } else {
+ connections.invalidate(remoteId);
}
- } while (!connection.addCall(call));
+ }
//we don't invoke the method below inside "synchronized (connections)"
//block above. The reason for that is if the server happens to be slow,
[5/7] hadoop git commit: YARN-3694. Fix dead link for TimelineServer
REST API. Contributed by Jagadesh Kiran N.
Posted by ji...@apache.org.
YARN-3694. Fix dead link for TimelineServer REST API. Contributed by Jagadesh Kiran N.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5def580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5def580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5def580
Branch: refs/heads/HDFS-7240
Commit: a5def580879428bc7af3c030ef33554e0519f072
Parents: 0e4f108
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu May 21 23:14:44 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu May 21 23:14:44 2015 +0900
----------------------------------------------------------------------
hadoop-project/src/site/site.xml | 2 +-
hadoop-yarn-project/CHANGES.txt | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5def580/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 7234881..f3bb458 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -134,7 +134,7 @@
<item name="Introduction" href="hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html"/>
<item name="Resource Manager" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html"/>
<item name="Node Manager" href="hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html"/>
- <item name="Timeline Server" href="TimelineServer.html#Timeline_Server_REST_API_v1"/>
+ <item name="Timeline Server" href="hadoop-yarn/hadoop-yarn-site/TimelineServer.html#Timeline_Server_REST_API_v1"/>
</menu>
<menu name="Hadoop Compatible File Systems" inherit="top">
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5def580/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3cba027..d1d2258 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -535,6 +535,9 @@ Release 2.7.1 - UNRELEASED
YARN-3609. Load node labels from storage inside RM serviceStart. (Wangda
Tan via jianhe)
+ YARN-3694. Fix dead link for TimelineServer REST API.
+ (Jagadesh Kiran N via aajisaka)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES