You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2017/08/01 15:47:24 UTC
[01/20] hadoop git commit: HDFS-12082. BlockInvalidateLimit value is
incorrectly set after namenode heartbeat interval reconfigured. Contributed
by Weiwei Yang. [Forced Update!]
Repository: hadoop
Updated Branches:
refs/heads/YARN-5734 fa2c5451f -> d3e2b6fd5 (forced update)
HDFS-12082. BlockInvalidateLimit value is incorrectly set after namenode heartbeat interval reconfigured. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e23415a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e23415a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e23415a
Branch: refs/heads/YARN-5734
Commit: 3e23415a92d43ce8818124f0b180227a52a33eaf
Parents: 0fd6d0f
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Jul 31 11:33:55 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Jul 31 11:33:55 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/DatanodeManager.java | 20 +++++++----
.../namenode/TestNameNodeReconfigure.java | 36 ++++++++++++++++++++
2 files changed, 50 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e23415a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 1d09751..2c5779a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -290,12 +290,19 @@ public class DatanodeManager {
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+ 10 * 1000 * heartbeatIntervalSeconds;
- final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
+
+ // Effected block invalidate limit is the bigger value between
+ // value configured in hdfs-site.xml, and 20 * HB interval.
+ final int configuredBlockInvalidateLimit = conf.getInt(
+ DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
- this.blockInvalidateLimit = conf.getInt(
- DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
+ final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
+ this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
+ configuredBlockInvalidateLimit);
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
- + "=" + this.blockInvalidateLimit);
+ + ": configured=" + configuredBlockInvalidateLimit
+ + ", counted=" + countedBlockInvalidateLimit
+ + ", effected=" + blockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
@@ -403,7 +410,8 @@ public class DatanodeManager {
return fsClusterStats;
}
- int getBlockInvalidateLimit() {
+ @VisibleForTesting
+ public int getBlockInvalidateLimit() {
return blockInvalidateLimit;
}
@@ -1911,7 +1919,7 @@ public class DatanodeManager {
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
* intervalSeconds;
this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
- DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
+ blockInvalidateLimit);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e23415a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 6b553df..c0de63a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -40,6 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
public class TestNameNodeReconfigure {
@@ -48,10 +49,13 @@ public class TestNameNodeReconfigure {
.getLog(TestNameNodeReconfigure.class);
private MiniDFSCluster cluster;
+ private final int customizedBlockInvalidateLimit = 500;
@Before
public void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY,
+ customizedBlockInvalidateLimit);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
}
@@ -212,6 +216,38 @@ public class TestNameNodeReconfigure {
datanodeManager.getHeartbeatRecheckInterval());
}
+ @Test
+ public void testBlockInvalidateLimitAfterReconfigured()
+ throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final DatanodeManager datanodeManager = nameNode.namesystem
+ .getBlockManager().getDatanodeManager();
+
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
+ customizedBlockInvalidateLimit,
+ datanodeManager.getBlockInvalidateLimit());
+
+ nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
+ Integer.toString(6));
+
+ // 20 * 6 = 120 < 500
+ // Invalid block limit should stay same as before after reconfiguration.
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ + " is not honored after reconfiguration",
+ customizedBlockInvalidateLimit,
+ datanodeManager.getBlockInvalidateLimit());
+
+ nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
+ Integer.toString(50));
+
+ // 20 * 50 = 1000 > 500
+ // Invalid block limit should be reset to 1000
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ + " is not reconfigured correctly",
+ 1000,
+ datanodeManager.getBlockInvalidateLimit());
+ }
+
@After
public void shutDown() throws IOException {
if (cluster != null) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/20] hadoop git commit: HDFS-12154. Incorrect javadoc description
in StorageLocationChecker#check. Contributed by Nandakumar.
Posted by xg...@apache.org.
HDFS-12154. Incorrect javadoc description in StorageLocationChecker#check. Contributed by Nandakumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea568123
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea568123
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea568123
Branch: refs/heads/YARN-5734
Commit: ea568123fa76e4683d355a67be01b730d0c11068
Parents: 2be9412
Author: Weiwei Yang <ww...@apache.org>
Authored: Tue Jul 18 17:28:08 2017 +0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Jul 31 17:02:44 2017 -0700
----------------------------------------------------------------------
.../hdfs/server/datanode/checker/StorageLocationChecker.java | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea568123/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index 2d1eebe..81575e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -128,16 +128,16 @@ public class StorageLocationChecker {
}
/**
- * Initiate a check of the supplied storage volumes and return
- * a list of failed volumes.
+ * Initiate a check on the supplied storage volumes and return
+ * a list of healthy volumes.
*
* StorageLocations are returned in the same order as the input
* for compatibility with existing unit tests.
*
* @param conf HDFS configuration.
* @param dataDirs list of volumes to check.
- * @return returns a list of failed volumes. Returns the empty list if
- * there are no failed volumes.
+ * @return returns a list of healthy volumes. Returns an empty list if
+ * there are no healthy volumes.
*
* @throws InterruptedException if the check was interrupted.
* @throws IOException if the number of failed volumes exceeds the
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/20] hadoop git commit: YARN-5951. Changes to allow
CapacityScheduler to use configuration store
Posted by xg...@apache.org.
YARN-5951. Changes to allow CapacityScheduler to use configuration store
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe6832ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe6832ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe6832ec
Branch: refs/heads/YARN-5734
Commit: fe6832ec5f09468c4eaef1ec0afc9563fea010fe
Parents: b38a1ee
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:35 2017 -0700
----------------------------------------------------------------------
.../scheduler/capacity/CapacityScheduler.java | 36 +++++------
.../CapacitySchedulerConfiguration.java | 10 +++
.../capacity/conf/CSConfigurationProvider.java | 46 ++++++++++++++
.../conf/FileBasedCSConfigurationProvider.java | 67 ++++++++++++++++++++
.../scheduler/capacity/conf/package-info.java | 29 +++++++++
.../capacity/TestCapacityScheduler.java | 4 +-
6 files changed, 170 insertions(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6832ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 2ccaf63..a6feb09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
-import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
@@ -105,6 +104,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -163,6 +164,7 @@ public class CapacityScheduler extends
private int offswitchPerHeartbeatLimit;
+ private CSConfigurationProvider csConfProvider;
@Override
public void setConf(Configuration conf) {
@@ -286,7 +288,18 @@ public class CapacityScheduler extends
IOException {
try {
writeLock.lock();
- this.conf = loadCapacitySchedulerConfiguration(configuration);
+ String confProviderStr = configuration.get(
+ CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+ CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+ if (confProviderStr.equals(
+ CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+ this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+ } else {
+ throw new IOException("Invalid CS configuration provider: " +
+ confProviderStr);
+ }
+ this.csConfProvider.init(configuration);
+ this.conf = this.csConfProvider.loadConfiguration(configuration);
validateConf(this.conf);
this.minimumAllocation = this.conf.getMinimumAllocation();
initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -393,7 +406,7 @@ public class CapacityScheduler extends
writeLock.lock();
Configuration configuration = new Configuration(newConf);
CapacitySchedulerConfiguration oldConf = this.conf;
- this.conf = loadCapacitySchedulerConfiguration(configuration);
+ this.conf = csConfProvider.loadConfiguration(configuration);
validateConf(this.conf);
try {
LOG.info("Re-initializing queues...");
@@ -1777,23 +1790,6 @@ public class CapacityScheduler extends
return true;
}
- private CapacitySchedulerConfiguration loadCapacitySchedulerConfiguration(
- Configuration configuration) throws IOException {
- try {
- InputStream CSInputStream =
- this.rmContext.getConfigurationProvider()
- .getConfigurationInputStream(configuration,
- YarnConfiguration.CS_CONFIGURATION_FILE);
- if (CSInputStream != null) {
- configuration.addResource(CSInputStream);
- return new CapacitySchedulerConfiguration(configuration, false);
- }
- return new CapacitySchedulerConfiguration(configuration, true);
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
-
private String getDefaultReservationQueueName(String planQueueName) {
return planQueueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6832ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 1e29d50..ac1a1d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -301,6 +301,16 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
@Private
public static final boolean DEFAULT_LAZY_PREEMPTION_ENABLED = false;
+ @Private
+ public static final String CS_CONF_PROVIDER = PREFIX
+ + "configuration.provider";
+
+ @Private
+ public static final String FILE_CS_CONF_PROVIDER = "file";
+
+ @Private
+ public static final String DEFAULT_CS_CONF_PROVIDER = FILE_CS_CONF_PROVIDER;
+
AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser();
public CapacitySchedulerConfiguration() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6832ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
new file mode 100644
index 0000000..c9984ac
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+
+import java.io.IOException;
+
+/**
+ * Configuration provider for {@link CapacityScheduler}.
+ */
+public interface CSConfigurationProvider {
+
+ /**
+ * Initialize the configuration provider with given conf.
+ * @param conf configuration to initialize with
+ */
+ void init(Configuration conf);
+
+ /**
+ * Loads capacity scheduler configuration object.
+ * @param conf initial bootstrap configuration
+ * @return CS configuration
+ * @throws IOException if fail to retrieve configuration
+ */
+ CapacitySchedulerConfiguration loadConfiguration(Configuration conf)
+ throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6832ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
new file mode 100644
index 0000000..51c64fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FileBasedCSConfigurationProvider.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * {@link CapacityScheduler} configuration provider based on local
+ * {@code capacity-scheduler.xml} file.
+ */
+public class FileBasedCSConfigurationProvider implements
+ CSConfigurationProvider {
+
+ private RMContext rmContext;
+
+ /**
+ * Construct file based CS configuration provider with given context.
+ * @param rmContext the RM context
+ */
+ public FileBasedCSConfigurationProvider(RMContext rmContext) {
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void init(Configuration conf) {}
+
+ @Override
+ public CapacitySchedulerConfiguration loadConfiguration(Configuration conf)
+ throws IOException {
+ try {
+ InputStream csInputStream =
+ this.rmContext.getConfigurationProvider()
+ .getConfigurationInputStream(conf,
+ YarnConfiguration.CS_CONFIGURATION_FILE);
+ if (csInputStream != null) {
+ conf.addResource(csInputStream);
+ return new CapacitySchedulerConfiguration(conf, false);
+ }
+ return new CapacitySchedulerConfiguration(conf, true);
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6832ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
new file mode 100644
index 0000000..08d0522
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package
+ * org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf
+ * contains classes related to capacity scheduler configuration management.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe6832ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0642cd9..b8af469 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -236,13 +236,13 @@ public class TestCapacityScheduler {
@Test (timeout = 30000)
public void testConfValidation() throws Exception {
- ResourceScheduler scheduler = new CapacityScheduler();
+ CapacityScheduler scheduler = new CapacityScheduler();
scheduler.setRMContext(resourceManager.getRMContext());
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024);
try {
- scheduler.reinitialize(conf, mockContext);
+ scheduler.init(conf);
fail("Exception is expected because the min memory allocation is" +
" larger than the max memory allocation.");
} catch (YarnRuntimeException e) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/20] hadoop git commit: Revert "YARN-6873. Moving logging APIs
over to slf4j in hadoop-yarn-server-applicationhistoryservice. Contributed by
Yeliang Cang."
Posted by xg...@apache.org.
Revert "YARN-6873. Moving logging APIs over to slf4j in hadoop-yarn-server-applicationhistoryservice. Contributed by Yeliang Cang."
This reverts commit 1a78c0ff016097930edf68e8278f826b637e918c.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4aa1cb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4aa1cb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4aa1cb4
Branch: refs/heads/YARN-5734
Commit: a4aa1cb40504299d3401008fdabc795eafb28713
Parents: 9586b0e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Aug 1 12:12:25 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Aug 1 12:15:03 2017 +0900
----------------------------------------------------------------------
.../ApplicationHistoryClientService.java | 8 ++---
.../ApplicationHistoryManagerImpl.java | 8 ++---
...pplicationHistoryManagerOnTimelineStore.java | 8 ++---
.../ApplicationHistoryServer.java | 10 +++---
.../FileSystemApplicationHistoryStore.java | 22 ++++++------
.../webapp/AHSWebServices.java | 7 ++--
.../webapp/NavBlock.java | 6 ++--
.../timeline/KeyValueBasedTimelineStore.java | 8 ++---
.../server/timeline/LeveldbTimelineStore.java | 35 ++++++++++----------
.../yarn/server/timeline/RollingLevelDB.java | 15 ++++-----
.../timeline/RollingLevelDBTimelineStore.java | 22 ++++++------
.../server/timeline/TimelineDataManager.java | 7 ++--
.../recovery/LeveldbTimelineStateStore.java | 30 ++++++++---------
.../timeline/security/TimelineACLsManager.java | 7 ++--
...lineDelegationTokenSecretManagerService.java | 8 ++---
.../timeline/webapp/TimelineWebServices.java | 7 ++--
.../TestFileSystemApplicationHistoryStore.java | 8 ++---
.../timeline/TestLeveldbTimelineStore.java | 2 +-
18 files changed, 107 insertions(+), 111 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
index 7d57048..73d5d39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
@@ -22,6 +22,8 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -59,13 +61,11 @@ import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class ApplicationHistoryClientService extends AbstractService implements
ApplicationHistoryProtocol {
- private static final Logger LOG =
- LoggerFactory.getLogger(ApplicationHistoryClientService.class);
+ private static final Log LOG = LogFactory
+ .getLog(ApplicationHistoryClientService.class);
private ApplicationHistoryManager history;
private Server server;
private InetSocketAddress bindAddress;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
index b8931d8..130bb32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
@@ -23,6 +23,8 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
@@ -40,13 +42,11 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.Container
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class ApplicationHistoryManagerImpl extends AbstractService implements
ApplicationHistoryManager {
- private static final Logger LOG =
- LoggerFactory.getLogger(ApplicationHistoryManagerImpl.class);
+ private static final Log LOG = LogFactory
+ .getLog(ApplicationHistoryManagerImpl.class);
private static final String UNAVAILABLE = "N/A";
private ApplicationHistoryStore historyStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 9240ed8..5404338 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -28,6 +28,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -67,14 +69,12 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
implements
ApplicationHistoryManager {
- private static final Logger LOG = LoggerFactory
- .getLogger(ApplicationHistoryManagerOnTimelineStore.class);
+ private static final Log LOG = LogFactory
+ .getLog(ApplicationHistoryManagerOnTimelineStore.class);
@VisibleForTesting
static final String UNAVAILABLE = "N/A";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 85e5f2d..6e6e98b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -22,6 +22,8 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
@@ -58,8 +60,6 @@ import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.webapp.WebAppContext;
import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* History server that keeps track of all types of history in the cluster.
@@ -68,8 +68,8 @@ import org.slf4j.LoggerFactory;
public class ApplicationHistoryServer extends CompositeService {
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
- private static final Logger LOG = LoggerFactory
- .getLogger(ApplicationHistoryServer.class);
+ private static final Log LOG = LogFactory
+ .getLog(ApplicationHistoryServer.class);
private ApplicationHistoryClientService ahsClientService;
private ApplicationACLsManager aclsManager;
@@ -178,7 +178,7 @@ public class ApplicationHistoryServer extends CompositeService {
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
- LOG.error("Error starting ApplicationHistoryServer", t);
+ LOG.fatal("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
index fa2da44..be7bc6d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
@@ -30,6 +30,8 @@ import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@@ -72,8 +74,6 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.C
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.protobuf.InvalidProtocolBufferException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* File system implementation of {@link ApplicationHistoryStore}. In this
@@ -89,8 +89,8 @@ import org.slf4j.LoggerFactory;
public class FileSystemApplicationHistoryStore extends AbstractService
implements ApplicationHistoryStore {
- private static final Logger LOG = LoggerFactory
- .getLogger(FileSystemApplicationHistoryStore.class);
+ private static final Log LOG = LogFactory
+ .getLog(FileSystemApplicationHistoryStore.class);
private static final String ROOT_DIR_NAME = "ApplicationHistoryDataRoot";
private static final int MIN_BLOCK_SIZE = 256 * 1024;
@@ -141,7 +141,7 @@ public class FileSystemApplicationHistoryStore extends AbstractService
}
outstandingWriters.clear();
} finally {
- IOUtils.cleanupWithLogger(LOG, fs);
+ IOUtils.cleanup(LOG, fs);
}
super.serviceStop();
}
@@ -711,12 +711,12 @@ public class FileSystemApplicationHistoryStore extends AbstractService
}
public void reset() throws IOException {
- IOUtils.cleanupWithLogger(LOG, scanner);
+ IOUtils.cleanup(LOG, scanner);
scanner = reader.createScanner();
}
public void close() {
- IOUtils.cleanupWithLogger(LOG, scanner, reader, fsdis);
+ IOUtils.cleanup(LOG, scanner, reader, fsdis);
}
}
@@ -740,13 +740,13 @@ public class FileSystemApplicationHistoryStore extends AbstractService
YarnConfiguration.DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE), null,
getConfig());
} catch (IOException e) {
- IOUtils.cleanupWithLogger(LOG, fsdos);
+ IOUtils.cleanup(LOG, fsdos);
throw e;
}
}
public synchronized void close() {
- IOUtils.cleanupWithLogger(LOG, writer, fsdos);
+ IOUtils.cleanup(LOG, writer, fsdos);
}
public synchronized void writeHistoryData(HistoryDataKey key, byte[] value)
@@ -756,13 +756,13 @@ public class FileSystemApplicationHistoryStore extends AbstractService
dos = writer.prepareAppendKey(-1);
key.write(dos);
} finally {
- IOUtils.cleanupWithLogger(LOG, dos);
+ IOUtils.cleanup(LOG, dos);
}
try {
dos = writer.prepareAppendValue(value.length);
dos.write(value);
} finally {
- IOUtils.cleanupWithLogger(LOG, dos);
+ IOUtils.cleanup(LOG, dos);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 13410a8..6195199 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -42,6 +42,8 @@ import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -78,15 +80,12 @@ import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.UniformInterfaceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
@Singleton
@Path("/ws/v1/applicationhistory")
public class AHSWebServices extends WebServices {
- private static final Logger LOG = LoggerFactory
- .getLogger(AHSWebServices.class);
+ private static final Log LOG = LogFactory.getLog(AHSWebServices.class);
private static final String NM_DOWNLOAD_URI_STR =
"/ws/v1/node/containers";
private static final Joiner JOINER = Joiner.on("");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
index a260634..3ee4dd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -18,20 +18,20 @@
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class NavBlock extends HtmlBlock {
@Override
public void render(Block html) {
boolean addErrorsAndWarningsLink = false;
- Logger log = LoggerFactory.getLogger(NavBlock.class);
+ Log log = LogFactory.getLog(NavBlock.class);
if (log instanceof Log4JLogger) {
Log4jWarningErrorMetricsAppender appender =
Log4jWarningErrorMetricsAppender.findAppender();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
index 82db770..79e2bf2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.timeline;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.AbstractService;
@@ -31,8 +33,6 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEnt
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
@@ -71,8 +71,8 @@ abstract class KeyValueBasedTimelineStore
private boolean serviceStopped = false;
- private static final Logger LOG
- = LoggerFactory.getLogger(KeyValueBasedTimelineStore.class);
+ private static final Log LOG
+ = LogFactory.getLog(KeyValueBasedTimelineStore.class);
public KeyValueBasedTimelineStore() {
super(KeyValueBasedTimelineStore.class.getName());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index e3db1dc..ffe0413 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -22,6 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
@@ -46,7 +48,6 @@ import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.*;
-import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@@ -117,8 +118,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
@InterfaceStability.Unstable
public class LeveldbTimelineStore extends AbstractService
implements TimelineStore {
- private static final org.slf4j.Logger LOG = LoggerFactory
- .getLogger(LeveldbTimelineStore.class);
+ private static final Log LOG = LogFactory
+ .getLog(LeveldbTimelineStore.class);
@Private
@VisibleForTesting
@@ -239,7 +240,7 @@ public class LeveldbTimelineStore extends AbstractService
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
- IOUtils.cleanupWithLogger(LOG, localFS);
+ IOUtils.cleanup(LOG, localFS);
}
LOG.info("Using leveldb path " + dbPath);
try {
@@ -283,7 +284,7 @@ public class LeveldbTimelineStore extends AbstractService
" closing db now", e);
}
}
- IOUtils.cleanupWithLogger(LOG, db);
+ IOUtils.cleanup(LOG, db);
super.serviceStop();
}
@@ -319,7 +320,7 @@ public class LeveldbTimelineStore extends AbstractService
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
- LOG.error(e.toString());
+ LOG.error(e);
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
@@ -393,7 +394,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
}
@@ -569,7 +570,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
return events;
}
@@ -752,7 +753,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
}
@@ -924,7 +925,7 @@ public class LeveldbTimelineStore extends AbstractService
} finally {
lock.unlock();
writeLocks.returnLock(lock);
- IOUtils.cleanupWithLogger(LOG, writeBatch);
+ IOUtils.cleanup(LOG, writeBatch);
}
for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
@@ -1375,7 +1376,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
}
@@ -1505,7 +1506,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, writeBatch);
+ IOUtils.cleanup(LOG, writeBatch);
}
}
@@ -1547,7 +1548,7 @@ public class LeveldbTimelineStore extends AbstractService
LOG.error("Got IOException while deleting entities for type " +
entityType + ", continuing to next type", e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator, pfIterator);
+ IOUtils.cleanup(LOG, iterator, pfIterator);
deleteLock.writeLock().unlock();
if (typeCount > 0) {
LOG.info("Deleted " + typeCount + " entities of type " +
@@ -1628,7 +1629,7 @@ public class LeveldbTimelineStore extends AbstractService
String incompatibleMessage =
"Incompatible version for timeline store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
- LOG.error(incompatibleMessage);
+ LOG.fatal(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
@@ -1717,7 +1718,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, writeBatch);
+ IOUtils.cleanup(LOG, writeBatch);
}
}
@@ -1754,7 +1755,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
}
@@ -1804,7 +1805,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
index 5c511a3..6d10671 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
@@ -33,6 +33,8 @@ import java.util.Map.Entry;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -43,8 +45,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Contains the logic to lookup a leveldb by timestamp so that multiple smaller
@@ -54,8 +54,7 @@ import org.slf4j.LoggerFactory;
class RollingLevelDB {
/** Logger for this class. */
- private static final Logger LOG = LoggerFactory.
- getLogger(RollingLevelDB.class);
+ private static final Log LOG = LogFactory.getLog(RollingLevelDB.class);
/** Factory to open and create new leveldb instances. */
private static JniDBFactory factory = new JniDBFactory();
/** Thread safe date formatter. */
@@ -152,7 +151,7 @@ class RollingLevelDB {
}
public void close() {
- IOUtils.cleanupWithLogger(LOG, writeBatch);
+ IOUtils.cleanup(LOG, writeBatch);
}
}
@@ -347,7 +346,7 @@ class RollingLevelDB {
.iterator();
while (iterator.hasNext()) {
Entry<Long, DB> entry = iterator.next();
- IOUtils.cleanupWithLogger(LOG, entry.getValue());
+ IOUtils.cleanup(LOG, entry.getValue());
String dbName = fdf.format(entry.getKey());
Path path = new Path(rollingDBPath, getName() + "." + dbName);
try {
@@ -362,9 +361,9 @@ class RollingLevelDB {
public void stop() throws Exception {
for (DB db : rollingdbs.values()) {
- IOUtils.cleanupWithLogger(LOG, db);
+ IOUtils.cleanup(LOG, db);
}
- IOUtils.cleanupWithLogger(LOG, lfs);
+ IOUtils.cleanup(LOG, lfs);
}
private long computeNextCheckMillis(long now) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 1ac170c..00f6630 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -38,6 +38,8 @@ import java.util.TreeMap;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
@@ -74,8 +76,6 @@ import org.iq80.leveldb.ReadOptions;
import org.iq80.leveldb.WriteBatch;
import org.nustaq.serialization.FSTConfiguration;
import org.nustaq.serialization.FSTClazzNameRegistry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import static java.nio.charset.StandardCharsets.UTF_8;
@@ -168,8 +168,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
@InterfaceStability.Unstable
public class RollingLevelDBTimelineStore extends AbstractService implements
TimelineStore {
- private static final Logger LOG = LoggerFactory
- .getLogger(RollingLevelDBTimelineStore.class);
+ private static final Log LOG = LogFactory
+ .getLog(RollingLevelDBTimelineStore.class);
private static FSTConfiguration fstConf =
FSTConfiguration.createDefaultConfiguration();
// Fall back to 2.24 parsing if 2.50 parsing fails
@@ -368,9 +368,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
+ " closing db now", e);
}
}
- IOUtils.cleanupWithLogger(LOG, domaindb);
- IOUtils.cleanupWithLogger(LOG, starttimedb);
- IOUtils.cleanupWithLogger(LOG, ownerdb);
+ IOUtils.cleanup(LOG, domaindb);
+ IOUtils.cleanup(LOG, starttimedb);
+ IOUtils.cleanup(LOG, ownerdb);
entitydb.stop();
indexdb.stop();
super.serviceStop();
@@ -399,7 +399,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
- LOG.error(e.toString());
+ LOG.error(e);
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
@@ -1525,7 +1525,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
+ ". Total start times deleted so far this cycle: "
+ startTimesCount);
}
- IOUtils.cleanupWithLogger(LOG, writeBatch);
+ IOUtils.cleanup(LOG, writeBatch);
writeBatch = starttimedb.createWriteBatch();
batchSize = 0;
}
@@ -1545,7 +1545,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
LOG.info("Deleted " + startTimesCount + "/" + totalCount
+ " start time entities earlier than " + minStartTime);
} finally {
- IOUtils.cleanupWithLogger(LOG, writeBatch);
+ IOUtils.cleanup(LOG, writeBatch);
}
return startTimesCount;
}
@@ -1622,7 +1622,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
String incompatibleMessage = "Incompatible version for timeline store: "
+ "expecting version " + getCurrentVersion()
+ ", but loading version " + loadedVersion;
- LOG.error(incompatibleMessage);
+ LOG.fatal(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
index 56b71fa..57a9346 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
@@ -26,6 +26,8 @@ import java.util.Iterator;
import java.util.List;
import java.util.SortedSet;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
@@ -43,8 +45,6 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* The class wrap over the timeline store and the ACLs manager. It does some non
@@ -54,8 +54,7 @@ import org.slf4j.LoggerFactory;
*/
public class TimelineDataManager extends AbstractService {
- private static final Logger LOG =
- LoggerFactory.getLogger(TimelineDataManager.class);
+ private static final Log LOG = LogFactory.getLog(TimelineDataManager.class);
@VisibleForTesting
public static final String DEFAULT_DOMAIN_ID = "DEFAULT";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
index bcd57ef..b62a541 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
@@ -28,6 +28,8 @@ import java.io.File;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -48,8 +50,6 @@ import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
@@ -60,8 +60,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
public class LeveldbTimelineStateStore extends
TimelineStateStore {
- public static final Logger LOG =
- LoggerFactory.getLogger(LeveldbTimelineStateStore.class);
+ public static final Log LOG =
+ LogFactory.getLog(LeveldbTimelineStateStore.class);
private static final String DB_NAME = "timeline-state-store.ldb";
private static final FsPermission LEVELDB_DIR_UMASK = FsPermission
@@ -103,7 +103,7 @@ public class LeveldbTimelineStateStore extends
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
- IOUtils.cleanupWithLogger(LOG, localFS);
+ IOUtils.cleanup(LOG, localFS);
}
JniDBFactory factory = new JniDBFactory();
try {
@@ -131,7 +131,7 @@ public class LeveldbTimelineStateStore extends
@Override
protected void closeStorage() throws IOException {
- IOUtils.cleanupWithLogger(LOG, db);
+ IOUtils.cleanup(LOG, db);
}
@Override
@@ -168,8 +168,8 @@ public class LeveldbTimelineStateStore extends
} catch (DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, ds);
- IOUtils.cleanupWithLogger(LOG, batch);
+ IOUtils.cleanup(LOG, ds);
+ IOUtils.cleanup(LOG, batch);
}
}
@@ -239,7 +239,7 @@ public class LeveldbTimelineStateStore extends
key.write(dataStream);
dataStream.close();
} finally {
- IOUtils.cleanupWithLogger(LOG, dataStream);
+ IOUtils.cleanup(LOG, dataStream);
}
return memStream.toByteArray();
}
@@ -253,7 +253,7 @@ public class LeveldbTimelineStateStore extends
try {
key.readFields(in);
} finally {
- IOUtils.cleanupWithLogger(LOG, in);
+ IOUtils.cleanup(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
@@ -267,7 +267,7 @@ public class LeveldbTimelineStateStore extends
try {
data.readFields(in);
} finally {
- IOUtils.cleanupWithLogger(LOG, in);
+ IOUtils.cleanup(LOG, in);
}
state.tokenState.put(data.getTokenIdentifier(), data.getRenewDate());
}
@@ -290,7 +290,7 @@ public class LeveldbTimelineStateStore extends
++numKeys;
}
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
return numKeys;
}
@@ -314,7 +314,7 @@ public class LeveldbTimelineStateStore extends
} catch (DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(LOG, iterator);
+ IOUtils.cleanup(LOG, iterator);
}
return numTokens;
}
@@ -332,7 +332,7 @@ public class LeveldbTimelineStateStore extends
try {
state.latestSequenceNumber = in.readInt();
} finally {
- IOUtils.cleanupWithLogger(LOG, in);
+ IOUtils.cleanup(LOG, in);
}
}
}
@@ -412,7 +412,7 @@ public class LeveldbTimelineStateStore extends
String incompatibleMessage =
"Incompatible version for timeline state store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
- LOG.error(incompatibleMessage);
+ LOG.fatal(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
index 6c32eec..25252fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
@@ -24,6 +24,8 @@ import java.util.HashMap;
import java.util.Map;
import org.apache.commons.collections.map.LRUMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
@@ -39,8 +41,6 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.util.StringHelper;
import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* <code>TimelineACLsManager</code> check the entity level timeline data access.
@@ -48,8 +48,7 @@ import org.slf4j.LoggerFactory;
@Private
public class TimelineACLsManager {
- private static final Logger LOG = LoggerFactory.
- getLogger(TimelineACLsManager.class);
+ private static final Log LOG = LogFactory.getLog(TimelineACLsManager.class);
private static final int DOMAIN_ACCESS_ENTRY_CACHE_SIZE = 100;
private AdminACLsManager adminAclsManager;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
index 0c6892a..60a0348 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.timeline.security;
import java.io.IOException;
import java.util.Map.Entry;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@@ -33,8 +35,6 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* The service wrapper of {@link TimelineDelegationTokenSecretManager}
@@ -118,8 +118,8 @@ public class TimelineDelegationTokenSecretManagerService extends
public static class TimelineDelegationTokenSecretManager extends
AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
- public static final Logger LOG =
- LoggerFactory.getLogger(TimelineDelegationTokenSecretManager.class);
+ public static final Log LOG =
+ LogFactory.getLog(TimelineDelegationTokenSecretManager.class);
private TimelineStateStore stateStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index be8e3c5..ad4e2bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -43,6 +43,8 @@ import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.JettyUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
@@ -66,16 +68,13 @@ import org.apache.hadoop.yarn.webapp.NotFoundException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
@Singleton
@Path("/ws/v1/timeline")
//TODO: support XML serialization/deserialization
public class TimelineWebServices {
- private static final Logger LOG = LoggerFactory
- .getLogger(TimelineWebServices.class);
+ private static final Log LOG = LogFactory.getLog(TimelineWebServices.class);
private TimelineDataManager timelineDataManager;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index df4adbe..15a00d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -32,6 +32,8 @@ import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -49,14 +51,12 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class TestFileSystemApplicationHistoryStore extends
ApplicationHistoryStoreTestUtils {
- private static final Logger LOG = LoggerFactory
- .getLogger(TestFileSystemApplicationHistoryStore.class.getName());
+ private static Log LOG = LogFactory
+ .getLog(TestFileSystemApplicationHistoryStore.class.getName());
private FileSystem fs;
private Path fsWorkingPath;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4aa1cb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
index f68a1c4..0c292d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
@@ -160,7 +160,7 @@ public class TestLeveldbTimelineStore extends TimelineStoreTestUtils {
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanupWithLogger(null, iterator, pfIterator);
+ IOUtils.cleanup(null, iterator, pfIterator);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/20] hadoop git commit: HADOOP-14343. Wrong pid file name in error
message when starting secure daemon
Posted by xg...@apache.org.
HADOOP-14343. Wrong pid file name in error message when starting secure daemon
Signed-off-by: Allen Wittenauer <aw...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abbf4129
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abbf4129
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abbf4129
Branch: refs/heads/YARN-5734
Commit: abbf4129a24c99fbce6d70b191ec19cf0d17e9be
Parents: 1a78c0f
Author: Andras Bokor <bo...@freemail.hu>
Authored: Mon Jul 31 20:03:43 2017 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon Jul 31 20:03:43 2017 -0700
----------------------------------------------------------------------
.../hadoop-common/src/main/bin/hadoop-functions.sh | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/abbf4129/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 8ac1b0c..2744643 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1873,11 +1873,9 @@ function hadoop_start_secure_daemon_wrapper
(( counter++ ))
done
- # this is for the daemon pid creation
#shellcheck disable=SC2086
- echo $! > "${jsvcpidfile}" 2>/dev/null
- if [[ $? -gt 0 ]]; then
- hadoop_error "ERROR: Cannot write ${daemonname} pid ${daemonpidfile}."
+ if ! echo $! > "${jsvcpidfile}"; then
+ hadoop_error "ERROR: Cannot write ${daemonname} pid ${jsvcpidfile}."
fi
sleep 1
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/20] hadoop git commit: YARN-5946: Create YarnConfigurationStore
interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung
Posted by xg...@apache.org.
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6023666b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6023666b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6023666b
Branch: refs/heads/YARN-5734
Commit: 6023666bd100f6b9f553f2ffc5e0ed6ed206f239
Parents: fe6832e
Author: Xuan <xg...@apache.org>
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:36 2017 -0700
----------------------------------------------------------------------
.../conf/InMemoryConfigurationStore.java | 86 +++++++++++
.../capacity/conf/YarnConfigurationStore.java | 154 +++++++++++++++++++
.../conf/TestYarnConfigurationStore.java | 70 +++++++++
3 files changed, 310 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6023666b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 0000000..a208fb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+ private Configuration schedConf;
+ private LinkedList<LogMutation> pendingMutations;
+ private long pendingId;
+
+ @Override
+ public void initialize(Configuration conf, Configuration schedConf) {
+ this.schedConf = schedConf;
+ this.pendingMutations = new LinkedList<>();
+ this.pendingId = 0;
+ }
+
+ @Override
+ public synchronized long logMutation(LogMutation logMutation) {
+ logMutation.setId(++pendingId);
+ pendingMutations.add(logMutation);
+ return pendingId;
+ }
+
+ @Override
+ public synchronized boolean confirmMutation(long id, boolean isValid) {
+ LogMutation mutation = pendingMutations.poll();
+ // If confirmMutation is called out of order, discard mutations until id
+ // is reached.
+ while (mutation != null) {
+ if (mutation.getId() == id) {
+ if (isValid) {
+ Map<String, String> mutations = mutation.getUpdates();
+ for (Map.Entry<String, String> kv : mutations.entrySet()) {
+ schedConf.set(kv.getKey(), kv.getValue());
+ }
+ }
+ return true;
+ }
+ mutation = pendingMutations.poll();
+ }
+ return false;
+ }
+
+ @Override
+ public synchronized Configuration retrieve() {
+ return schedConf;
+ }
+
+ @Override
+ public synchronized List<LogMutation> getPendingMutations() {
+ return pendingMutations;
+ }
+
+ @Override
+ public List<LogMutation> getConfirmedConfHistory(long fromId) {
+ // Unimplemented.
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6023666b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
new file mode 100644
index 0000000..22c0ef8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * YarnConfigurationStore exposes the methods needed for retrieving and
+ * persisting {@link CapacityScheduler} configuration via key-value
+ * using write-ahead logging. When configuration mutation is requested, caller
+ * should first log it with {@code logMutation}, which persists this pending
+ * mutation. This mutation is merged to the persisted configuration only after
+ * {@code confirmMutation} is called.
+ *
+ * On startup/recovery, caller should call {@code retrieve} to get all
+ * confirmed mutations, then get pending mutations which were not confirmed via
+ * {@code getPendingMutations}, and replay/confirm them via
+ * {@code confirmMutation} as in the normal case.
+ */
+public interface YarnConfigurationStore {
+
+ /**
+ * LogMutation encapsulates the fields needed for configuration mutation
+ * audit logging and recovery.
+ */
+ class LogMutation {
+ private Map<String, String> updates;
+ private String user;
+ private long id;
+
+ /**
+ * Create log mutation prior to logging.
+ * @param updates key-value configuration updates
+ * @param user user who requested configuration change
+ */
+ public LogMutation(Map<String, String> updates, String user) {
+ this(updates, user, 0);
+ }
+
+ /**
+ * Create log mutation for recovery.
+ * @param updates key-value configuration updates
+ * @param user user who requested configuration change
+ * @param id transaction id of configuration change
+ */
+ LogMutation(Map<String, String> updates, String user, long id) {
+ this.updates = updates;
+ this.user = user;
+ this.id = id;
+ }
+
+ /**
+ * Get key-value configuration updates.
+ * @return map of configuration updates
+ */
+ public Map<String, String> getUpdates() {
+ return updates;
+ }
+
+ /**
+ * Get user who requested configuration change.
+ * @return user who requested configuration change
+ */
+ public String getUser() {
+ return user;
+ }
+
+ /**
+ * Get transaction id of this configuration change.
+ * @return transaction id
+ */
+ public long getId() {
+ return id;
+ }
+
+ /**
+ * Set transaction id of this configuration change.
+ * @param id transaction id
+ */
+ public void setId(long id) {
+ this.id = id;
+ }
+ }
+
+ /**
+ * Initialize the configuration store.
+ * @param conf configuration to initialize store with
+ * @param schedConf Initial key-value configuration to persist
+ */
+ void initialize(Configuration conf, Configuration schedConf);
+
+ /**
+ * Logs the configuration change to backing store. Generates an id associated
+ * with this mutation, sets it in {@code logMutation}, and returns it.
+ * @param logMutation configuration change to be persisted in write ahead log
+ * @return id which configuration store associates with this mutation
+ */
+ long logMutation(LogMutation logMutation);
+
+ /**
+ * Should be called after {@code logMutation}. Gets the pending mutation
+ * associated with {@code id} and marks the mutation as persisted (no longer
+ * pending). If isValid is true, merge the mutation with the persisted
+ * configuration.
+ *
+ * If {@code confirmMutation} is called with ids in a different order than
+ * was returned by {@code logMutation}, the result is implementation
+ * dependent.
+ * @param id id of mutation to be confirmed
+ * @param isValid if true, update persisted configuration with mutation
+ * associated with {@code id}.
+ * @return true on success
+ */
+ boolean confirmMutation(long id, boolean isValid);
+
+ /**
+ * Retrieve the persisted configuration.
+ * @return configuration as key-value
+ */
+ Configuration retrieve();
+
+ /**
+ * Get the list of pending mutations, in the order they were logged.
+ * @return list of mutations
+ */
+ List<LogMutation> getPendingMutations();
+
+ /**
+ * Get a list of confirmed configuration mutations starting from a given id.
+ * @param fromId id from which to start getting mutations, inclusive
+ * @return list of configuration mutations
+ */
+ List<LogMutation> getConfirmedConfHistory(long fromId);
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6023666b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
new file mode 100644
index 0000000..dff4e77
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+public class TestYarnConfigurationStore {
+
+ private YarnConfigurationStore confStore;
+ private Configuration schedConf;
+
+ private static final String testUser = "testUser";
+
+ @Before
+ public void setUp() {
+ schedConf = new Configuration(false);
+ schedConf.set("key1", "val1");
+ }
+
+ @Test
+ public void testInMemoryConfigurationStore() {
+ confStore = new InMemoryConfigurationStore();
+ confStore.initialize(new Configuration(), schedConf);
+ assertEquals("val1", confStore.retrieve().get("key1"));
+
+ Map<String, String> update1 = new HashMap<>();
+ update1.put("keyUpdate1", "valUpdate1");
+ LogMutation mutation1 = new LogMutation(update1, testUser);
+ long id = confStore.logMutation(mutation1);
+ assertEquals(1, confStore.getPendingMutations().size());
+ confStore.confirmMutation(id, true);
+ assertEquals("valUpdate1", confStore.retrieve().get("keyUpdate1"));
+ assertEquals(0, confStore.getPendingMutations().size());
+
+ Map<String, String> update2 = new HashMap<>();
+ update2.put("keyUpdate2", "valUpdate2");
+ LogMutation mutation2 = new LogMutation(update2, testUser);
+ id = confStore.logMutation(mutation2);
+ assertEquals(1, confStore.getPendingMutations().size());
+ confStore.confirmMutation(id, false);
+ assertNull("Configuration should not be updated",
+ confStore.retrieve().get("keyUpdate2"));
+ assertEquals(0, confStore.getPendingMutations().size());
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/20] hadoop git commit: Revert "MAPREDUCE-5875. Make Counter
limits consistent across JobClient, MRAppMaster,
and YarnChild. (Gera Shegalov via kasha)"
Posted by xg...@apache.org.
Revert "MAPREDUCE-5875. Make Counter limits consistent across JobClient, MRAppMaster, and YarnChild. (Gera Shegalov via kasha)"
This reverts commit e8a31f2e1c34514fba2f480e8db652f6e2ed65d8.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbb7d6bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbb7d6bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbb7d6bc
Branch: refs/heads/YARN-5734
Commit: fbb7d6bcbb887ce52ab1e9d5a1fed67a7f8a4be8
Parents: 3e23415
Author: Junping Du <ju...@apache.org>
Authored: Mon Jul 31 14:09:16 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Mon Jul 31 14:09:16 2017 -0700
----------------------------------------------------------------------
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 3 -
.../org/apache/hadoop/mapreduce/Cluster.java | 16 ++--
.../apache/hadoop/mapreduce/JobSubmitter.java | 2 -
.../hadoop/mapreduce/counters/Limits.java | 5 --
.../mapreduce/jobhistory/HistoryViewer.java | 16 ----
.../hadoop/mapreduce/v2/hs/CompletedJob.java | 15 ----
.../apache/hadoop/mapreduce/v2/TestMRJobs.java | 87 +++-----------------
7 files changed, 21 insertions(+), 123 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 1445481..8c9f605 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TypeConverter;
-import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.EventReader;
import org.apache.hadoop.mapreduce.jobhistory.EventType;
@@ -1281,8 +1280,6 @@ public class MRAppMaster extends CompositeService {
// finally set the job classloader
MRApps.setClassLoader(jobClassLoader, getConfig());
- // set job classloader if configured
- Limits.init(getConfig());
if (initFailed) {
JobEvent initFailedEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT_FAILED);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index fbf6806..4245daf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -213,15 +213,15 @@ public class Cluster {
public Job getJob(JobID jobId) throws IOException, InterruptedException {
JobStatus status = client.getJobStatus(jobId);
if (status != null) {
- final JobConf conf = new JobConf();
- final Path jobPath = new Path(client.getFilesystemName(),
- status.getJobFile());
- final FileSystem fs = FileSystem.get(jobPath.toUri(), getConf());
+ JobConf conf;
try {
- conf.addResource(fs.open(jobPath), jobPath.toString());
- } catch (FileNotFoundException fnf) {
- if (LOG.isWarnEnabled()) {
- LOG.warn("Job conf missing on cluster", fnf);
+ conf = new JobConf(status.getJobFile());
+ } catch (RuntimeException ex) {
+ // If job file doesn't exist it means we can't find the job
+ if (ex.getCause() instanceof FileNotFoundException) {
+ return null;
+ } else {
+ throw ex;
}
}
return Job.getInstance(this, status, conf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 6ade376..e5ff26d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.mapred.QueueACL;
import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
-import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.security.TokenCache;
@@ -246,7 +245,6 @@ class JobSubmitter {
// Write job file to submit dir
writeConf(conf, submitJobFile);
- Limits.reset(conf);
//
// Now, actually submit the job (using the submit name)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
index 3821694..34b0fae 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
@@ -123,9 +123,4 @@ public class Limits {
public synchronized LimitExceededException violation() {
return firstViolation;
}
-
- public static synchronized void reset(Configuration conf) {
- isInited = false;
- init(conf);
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index 25c0630..5f10fdf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.util.HashMap;
@@ -25,8 +24,6 @@ import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -36,7 +33,6 @@ import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
import org.apache.hadoop.mapreduce.util.HostUtil;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@@ -49,7 +45,6 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HistoryViewer {
- private static final Log LOG = LogFactory.getLog(HistoryViewer.class);
private FileSystem fs;
private JobInfo job;
private HistoryViewerPrinter jhvp;
@@ -89,17 +84,6 @@ public class HistoryViewer {
System.err.println("Ignore unrecognized file: " + jobFile.getName());
throw new IOException(errorMsg);
}
- final Path jobConfPath = new Path(jobFile.getParent(), jobDetails[0]
- + "_" + jobDetails[1] + "_" + jobDetails[2] + "_conf.xml");
- final Configuration jobConf = new Configuration(conf);
- try {
- jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
- Limits.reset(conf);
- } catch (FileNotFoundException fnf) {
- if (LOG.isWarnEnabled()) {
- LOG.warn("Missing job conf in history", fnf);
- }
- }
JobHistoryParser parser = new JobHistoryParser(fs, jobFile);
job = parser.parse();
String scheme = WebAppUtils.getHttpSchemePrefix(fs.getConf());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index 4deb9ae..bbb126d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.mapreduce.v2.hs;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.ArrayList;
@@ -35,7 +34,6 @@ import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.TaskCompletionEvent;
@@ -43,7 +41,6 @@ import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TypeConverter;
-import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
@@ -353,19 +350,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
if (historyFileAbsolute != null) {
JobHistoryParser parser = null;
try {
- final FileSystem fs = historyFileAbsolute.getFileSystem(conf);
parser = createJobHistoryParser(historyFileAbsolute);
- final Path jobConfPath = new Path(historyFileAbsolute.getParent(),
- JobHistoryUtils.getIntermediateConfFileName(jobId));
- final Configuration conf = new Configuration();
- try {
- conf.addResource(fs.open(jobConfPath), jobConfPath.toString());
- Limits.reset(conf);
- } catch (FileNotFoundException fnf) {
- if (LOG.isWarnEnabled()) {
- LOG.warn("Missing job conf in history", fnf);
- }
- }
this.jobInfo = parser.parse();
} catch (IOException e) {
throw new YarnRuntimeException("Could not load history file "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbb7d6bc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index 7a0c43e..c6d2168 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -55,14 +55,10 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
@@ -114,7 +110,6 @@ public class TestMRJobs {
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
private static final int NUM_NODE_MGRS = 3;
private static final String TEST_IO_SORT_MB = "11";
- private static final String TEST_GROUP_MAX = "200";
private static final int DEFAULT_REDUCES = 2;
protected int numSleepReducers = DEFAULT_REDUCES;
@@ -466,58 +461,31 @@ public class TestMRJobs {
}
@Test(timeout = 300000)
- public void testConfVerificationWithClassloader() throws Exception {
- testConfVerification(true, false, false, false);
- }
-
- @Test(timeout = 300000)
- public void testConfVerificationWithClassloaderCustomClasses()
- throws Exception {
- testConfVerification(true, true, false, false);
- }
-
- @Test(timeout = 300000)
- public void testConfVerificationWithOutClassloader() throws Exception {
- testConfVerification(false, false, false, false);
- }
-
- @Test(timeout = 300000)
- public void testConfVerificationWithJobClient() throws Exception {
- testConfVerification(false, false, true, false);
+ public void testJobClassloader() throws IOException, InterruptedException,
+ ClassNotFoundException {
+ testJobClassloader(false);
}
@Test(timeout = 300000)
- public void testConfVerificationWithJobClientLocal() throws Exception {
- testConfVerification(false, false, true, true);
+ public void testJobClassloaderWithCustomClasses() throws IOException,
+ InterruptedException, ClassNotFoundException {
+ testJobClassloader(true);
}
- private void testConfVerification(boolean useJobClassLoader,
- boolean useCustomClasses, boolean useJobClientForMonitring,
- boolean useLocal) throws Exception {
- LOG.info("\n\n\nStarting testConfVerification()"
- + " jobClassloader=" + useJobClassLoader
- + " customClasses=" + useCustomClasses
- + " jobClient=" + useJobClientForMonitring
- + " localMode=" + useLocal);
+ private void testJobClassloader(boolean useCustomClasses) throws IOException,
+ InterruptedException, ClassNotFoundException {
+ LOG.info("\n\n\nStarting testJobClassloader()"
+ + " useCustomClasses=" + useCustomClasses);
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
return;
}
- final Configuration clusterConfig;
- if (useLocal) {
- clusterConfig = new Configuration();
- conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
- } else {
- clusterConfig = mrCluster.getConfig();
- }
- final JobClient jc = new JobClient(clusterConfig);
- final Configuration sleepConf = new Configuration(clusterConfig);
+ final Configuration sleepConf = new Configuration(mrCluster.getConfig());
// set master address to local to test that local mode applied iff framework == local
sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
- sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,
- useJobClassLoader);
+ sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
if (useCustomClasses) {
// to test AM loading user classes such as output format class, we want
// to blacklist them from the system classes (they need to be prepended
@@ -535,7 +503,6 @@ public class TestMRJobs {
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
- sleepConf.set(MRJobConfig.COUNTER_GROUPS_MAX_KEY, TEST_GROUP_MAX);
final SleepJob sleepJob = new SleepJob();
sleepJob.setConf(sleepConf);
final Job job = sleepJob.createJob(1, 1, 10, 1, 10, 1);
@@ -553,26 +520,7 @@ public class TestMRJobs {
jobConf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true);
}
job.submit();
- final boolean succeeded;
- if (useJobClientForMonitring && !useLocal) {
- // We can't use getJobID in useLocal case because JobClient and Job
- // point to different instances of LocalJobRunner
- //
- final JobID mapredJobID = JobID.downgrade(job.getJobID());
- RunningJob runningJob = null;
- do {
- Thread.sleep(10);
- runningJob = jc.getJob(mapredJobID);
- } while (runningJob == null);
- Assert.assertEquals("Unexpected RunningJob's "
- + MRJobConfig.COUNTER_GROUPS_MAX_KEY,
- TEST_GROUP_MAX, runningJob.getConfiguration()
- .get(MRJobConfig.COUNTER_GROUPS_MAX_KEY));
- runningJob.waitForCompletion();
- succeeded = runningJob.isSuccessful();
- } else {
- succeeded = job.waitForCompletion(true);
- }
+ boolean succeeded = job.waitForCompletion(true);
Assert.assertTrue("Job status: " + job.getStatus().getFailureInfo(),
succeeded);
}
@@ -1366,14 +1314,5 @@ public class TestMRJobs {
+ ", actual: " + ioSortMb);
}
}
-
- @Override
- public void map(IntWritable key, IntWritable value, Context context) throws IOException, InterruptedException {
- super.map(key, value, context);
- for (int i = 0; i < 100; i++) {
- context.getCounter("testCounterGroup-" + i,
- "testCounter").increment(1);
- }
- }
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/20] hadoop git commit: HADOOP-14245. Use Mockito.when instead of
Mockito.stub. Contributed by Andras Bokor.
Posted by xg...@apache.org.
HADOOP-14245. Use Mockito.when instead of Mockito.stub. Contributed by Andras Bokor.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b38a1eea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b38a1eea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b38a1eea
Branch: refs/heads/YARN-5734
Commit: b38a1eea8e2917989d83d169a7b5773163e6832e
Parents: ceacadc
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Aug 1 15:15:43 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Aug 1 15:15:43 2017 +0900
----------------------------------------------------------------------
.../org/apache/hadoop/TestGenericRefresh.java | 28 ++++++++++----------
.../util/TestCgroupsLCEResourcesHandler.java | 2 +-
2 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b38a1eea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
index 3c73c28..dcd91c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
@@ -72,16 +72,16 @@ public class TestGenericRefresh {
public void setUp() throws Exception {
// Register Handlers, first one just sends an ok response
firstHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toReturn(RefreshResponse.successResponse());
+ Mockito.when(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenReturn(RefreshResponse.successResponse());
RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler);
// Second handler has conditional response for testing args
secondHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
- .toReturn(new RefreshResponse(3, "three"));
- Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
- .toReturn(new RefreshResponse(2, "two"));
+ Mockito.when(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
+ .thenReturn(new RefreshResponse(3, "three"));
+ Mockito.when(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
+ .thenReturn(new RefreshResponse(2, "two"));
RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
}
@@ -181,12 +181,12 @@ public class TestGenericRefresh {
public void testMultipleReturnCodeMerging() throws Exception {
// Two handlers which return two non-zero values
RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
- Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toReturn(new RefreshResponse(23, "Twenty Three"));
+ Mockito.when(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenReturn(new RefreshResponse(23, "Twenty Three"));
RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
- Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toReturn(new RefreshResponse(10, "Ten"));
+ Mockito.when(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenReturn(new RefreshResponse(10, "Ten"));
// Then registered to the same ID
RefreshRegistry.defaultRegistry().register("shared", handlerOne);
@@ -210,12 +210,12 @@ public class TestGenericRefresh {
public void testExceptionResultsInNormalError() throws Exception {
// In this test, we ensure that all handlers are called even if we throw an exception in one
RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
+ Mockito.when(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenThrow(new RuntimeException("Exceptional Handler Throws Exception"));
RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toThrow(new RuntimeException("More Exceptions"));
+ Mockito.when(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenThrow(new RuntimeException("More Exceptions"));
RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b38a1eea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
index b562133..1ed8fd8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
@@ -76,7 +76,7 @@ public class TestCgroupsLCEResourcesHandler {
// Test 1, tasks file is empty
// tasks file has no data, should return true
- Mockito.stub(fspy.delete()).toReturn(true);
+ Mockito.when(fspy.delete()).thenReturn(true);
Assert.assertTrue(handler.checkAndDeleteCgroup(fspy));
// Test 2, tasks file has data
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[20/20] hadoop git commit: YARN-5947: Create
LeveldbConfigurationStore class using Leveldb as backing store. Contributed
by Jonathan Hung
Posted by xg...@apache.org.
YARN-5947: Create LeveldbConfigurationStore class using Leveldb as backing store. Contributed by Jonathan Hung
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3e2b6fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3e2b6fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3e2b6fd
Branch: refs/heads/YARN-5734
Commit: d3e2b6fd5b7701a7d5f2ac33b09c72f520da8d6e
Parents: 79701d9
Author: Xuan <xg...@apache.org>
Authored: Mon Jul 31 16:48:40 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:44 2017 -0700
----------------------------------------------------------------------
.../hadoop/yarn/conf/YarnConfiguration.java | 13 +
.../src/main/resources/yarn-default.xml | 29 ++
.../scheduler/MutableConfigurationProvider.java | 6 +
.../scheduler/capacity/CapacityScheduler.java | 3 +
.../conf/LeveldbConfigurationStore.java | 314 +++++++++++++++++++
.../conf/MutableCSConfigurationProvider.java | 38 ++-
.../capacity/conf/YarnConfigurationStore.java | 14 +-
.../conf/TestYarnConfigurationStore.java | 3 +-
8 files changed, 414 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 01db626..c3644cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -624,8 +624,21 @@ public class YarnConfiguration extends Configuration {
public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
YARN_PREFIX + "scheduler.configuration.store.class";
public static final String MEMORY_CONFIGURATION_STORE = "memory";
+ public static final String LEVELDB_CONFIGURATION_STORE = "leveldb";
public static final String DEFAULT_CONFIGURATION_STORE =
MEMORY_CONFIGURATION_STORE;
+ public static final String RM_SCHEDCONF_STORE_PATH = YARN_PREFIX
+ + "scheduler.configuration.leveldb-store.path";
+
+ public static final String RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS =
+ YARN_PREFIX
+ + "scheduler.configuration.leveldb-store.compaction-interval-secs";
+ public static final long
+ DEFAULT_RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS = 60 * 60 * 24L;
+
+ public static final String RM_SCHEDCONF_LEVELDB_MAX_LOGS =
+ YARN_PREFIX + "scheduler.configuration.leveldb-store.max-logs";
+ public static final int DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS = 1000;
public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a0bed5f..6de9ab6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3159,4 +3159,33 @@
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy</value>
</property>
+ <property>
+ <description>
+ The storage path for LevelDB implementation of configuration store,
+ when yarn.scheduler.configuration.store.class is configured to be
+ "leveldb".
+ </description>
+ <name>yarn.scheduler.configuration.leveldb-store.path</name>
+ <value>${hadoop.tmp.dir}/yarn/system/confstore</value>
+ </property>
+
+ <property>
+ <description>
+ The compaction interval for LevelDB configuration store in secs,
+ when yarn.scheduler.configuration.store.class is configured to be
+ "leveldb". Default is one day.
+ </description>
+ <name>yarn.scheduler.configuration.leveldb-store.compaction-interval-secs</name>
+ <value>86400</value>
+ </property>
+
+ <property>
+ <description>
+ The max number of configuration change log entries kept in LevelDB config
+ store, when yarn.scheduler.configuration.store.class is configured to be
+ "leveldb". Default is 1000.
+ </description>
+ <name>yarn.scheduler.configuration.leveldb-store.max-logs</name>
+ <value>1000</value>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 86be7c3..1f13467 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -29,6 +29,12 @@ import java.io.IOException;
public interface MutableConfigurationProvider {
/**
+ * Apply transactions which were not committed.
+ * @throws IOException if recovery fails
+ */
+ void recoverConf() throws IOException;
+
+ /**
* Update the scheduler configuration with the provided key value pairs.
* @param user User issuing the request
* @param confUpdate Key-value pairs for configurations to be updated.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 8a54013..1962e65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -386,6 +386,9 @@ public class CapacityScheduler extends
@Override
public void serviceStart() throws Exception {
startSchedulerThreads();
+ if (this.csConfProvider instanceof MutableConfigurationProvider) {
+ ((MutableConfigurationProvider) csConfProvider).recoverConf();
+ }
super.serviceStart();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
new file mode 100644
index 0000000..1534685
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
@@ -0,0 +1,314 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.fusesource.leveldbjni.JniDBFactory;
+import org.fusesource.leveldbjni.internal.NativeDB;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBComparator;
+import org.iq80.leveldb.DBException;
+import org.iq80.leveldb.DBIterator;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.WriteBatch;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
+
+import static org.fusesource.leveldbjni.JniDBFactory.bytes;
+
+/**
+ * A LevelDB implementation of {@link YarnConfigurationStore}.
+ */
+public class LeveldbConfigurationStore implements YarnConfigurationStore {
+
+ public static final Log LOG =
+ LogFactory.getLog(LeveldbConfigurationStore.class);
+
+ private static final String DB_NAME = "yarn-conf-store";
+ private static final String LOG_PREFIX = "log.";
+ private static final String LOG_COMMITTED_TXN = "committedTxn";
+
+ private DB db;
+ private long txnId = 0;
+ private long minTxn = 0;
+ private long maxLogs;
+ private Configuration conf;
+ private LinkedList<LogMutation> pendingMutations = new LinkedList<>();
+ private Timer compactionTimer;
+ private long compactionIntervalMsec;
+
+ @Override
+ public void initialize(Configuration config, Configuration schedConf)
+ throws IOException {
+ this.conf = config;
+ try {
+ this.db = initDatabase(schedConf);
+ this.txnId = Long.parseLong(new String(db.get(bytes(LOG_COMMITTED_TXN)),
+ StandardCharsets.UTF_8));
+ DBIterator itr = db.iterator();
+ itr.seek(bytes(LOG_PREFIX + txnId));
+ // Seek to first uncommitted log
+ itr.next();
+ while (itr.hasNext()) {
+ Map.Entry<byte[], byte[]> entry = itr.next();
+ if (!new String(entry.getKey(), StandardCharsets.UTF_8)
+ .startsWith(LOG_PREFIX)) {
+ break;
+ }
+ pendingMutations.add(deserLogMutation(entry.getValue()));
+ }
+ // Get the earliest txnId stored in logs
+ itr.seekToFirst();
+ if (itr.hasNext()) {
+ Map.Entry<byte[], byte[]> entry = itr.next();
+ byte[] key = entry.getKey();
+ String logId = new String(key, StandardCharsets.UTF_8);
+ if (logId.startsWith(LOG_PREFIX)) {
+ minTxn = Long.parseLong(logId.substring(logId.indexOf('.') + 1));
+ }
+ }
+ this.maxLogs = config.getLong(
+ YarnConfiguration.RM_SCHEDCONF_LEVELDB_MAX_LOGS,
+ YarnConfiguration.DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS);
+ this.compactionIntervalMsec = config.getLong(
+ YarnConfiguration.RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS,
+ YarnConfiguration
+ .DEFAULT_RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS) * 1000;
+ startCompactionTimer();
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+
+ private DB initDatabase(Configuration config) throws Exception {
+ Path storeRoot = createStorageDir();
+ Options options = new Options();
+ options.createIfMissing(false);
+ options.comparator(new DBComparator() {
+ @Override
+ public int compare(byte[] key1, byte[] key2) {
+ String key1Str = new String(key1, StandardCharsets.UTF_8);
+ String key2Str = new String(key2, StandardCharsets.UTF_8);
+ int key1Txn = Integer.MAX_VALUE;
+ int key2Txn = Integer.MAX_VALUE;
+ if (key1Str.startsWith(LOG_PREFIX)) {
+ key1Txn = Integer.parseInt(key1Str.substring(
+ key1Str.indexOf('.') + 1));
+ }
+ if (key2Str.startsWith(LOG_PREFIX)) {
+ key2Txn = Integer.parseInt(key2Str.substring(
+ key2Str.indexOf('.') + 1));
+ }
+ // TODO txnId could overflow, in theory
+ if (key1Txn == Integer.MAX_VALUE && key2Txn == Integer.MAX_VALUE) {
+ if (key1Str.equals(key2Str) && key1Str.equals(LOG_COMMITTED_TXN)) {
+ return 0;
+ } else if (key1Str.equals(LOG_COMMITTED_TXN)) {
+ return -1;
+ } else if (key2Str.equals(LOG_COMMITTED_TXN)) {
+ return 1;
+ }
+ return key1Str.compareTo(key2Str);
+ }
+ return key1Txn - key2Txn;
+ }
+
+ @Override
+ public String name() {
+ return "logComparator";
+ }
+
+ public byte[] findShortestSeparator(byte[] start, byte[] limit) {
+ return start;
+ }
+
+ public byte[] findShortSuccessor(byte[] key) {
+ return key;
+ }
+ });
+ LOG.info("Using conf database at " + storeRoot);
+ File dbfile = new File(storeRoot.toString());
+ try {
+ db = JniDBFactory.factory.open(dbfile, options);
+ } catch (NativeDB.DBException e) {
+ if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
+ LOG.info("Creating conf database at " + dbfile);
+ options.createIfMissing(true);
+ try {
+ db = JniDBFactory.factory.open(dbfile, options);
+ // Write the initial scheduler configuration
+ WriteBatch initBatch = db.createWriteBatch();
+ for (Map.Entry<String, String> kv : config) {
+ initBatch.put(bytes(kv.getKey()), bytes(kv.getValue()));
+ }
+ initBatch.put(bytes(LOG_COMMITTED_TXN), bytes("0"));
+ db.write(initBatch);
+ } catch (DBException dbErr) {
+ throw new IOException(dbErr.getMessage(), dbErr);
+ }
+ } else {
+ throw e;
+ }
+ }
+ return db;
+ }
+
+ private Path createStorageDir() throws IOException {
+ Path root = getStorageDir();
+ FileSystem fs = FileSystem.getLocal(conf);
+ fs.mkdirs(root, new FsPermission((short) 0700));
+ return root;
+ }
+
+ private Path getStorageDir() throws IOException {
+ String storePath = conf.get(YarnConfiguration.RM_SCHEDCONF_STORE_PATH);
+ if (storePath == null) {
+ throw new IOException("No store location directory configured in " +
+ YarnConfiguration.RM_SCHEDCONF_STORE_PATH);
+ }
+ return new Path(storePath, DB_NAME);
+ }
+
+ @Override
+ public synchronized long logMutation(LogMutation logMutation)
+ throws IOException {
+ logMutation.setId(++txnId);
+ WriteBatch logBatch = db.createWriteBatch();
+ logBatch.put(bytes(LOG_PREFIX + txnId), serLogMutation(logMutation));
+ if (txnId - minTxn >= maxLogs) {
+ logBatch.delete(bytes(LOG_PREFIX + minTxn));
+ minTxn++;
+ }
+ db.write(logBatch);
+ pendingMutations.add(logMutation);
+ return txnId;
+ }
+
+ @Override
+ public synchronized boolean confirmMutation(long id, boolean isValid)
+ throws IOException {
+ WriteBatch updateBatch = db.createWriteBatch();
+ if (isValid) {
+ LogMutation mutation = deserLogMutation(db.get(bytes(LOG_PREFIX + id)));
+ for (Map.Entry<String, String> changes :
+ mutation.getUpdates().entrySet()) {
+ if (changes.getValue() == null || changes.getValue().isEmpty()) {
+ updateBatch.delete(bytes(changes.getKey()));
+ } else {
+ updateBatch.put(bytes(changes.getKey()), bytes(changes.getValue()));
+ }
+ }
+ }
+ updateBatch.put(bytes(LOG_COMMITTED_TXN), bytes(String.valueOf(id)));
+ db.write(updateBatch);
+ // Assumes logMutation and confirmMutation are done in the same
+ // synchronized method. For example,
+ // {@link MutableCSConfigurationProvider#mutateConfiguration(
+ // UserGroupInformation user, SchedConfUpdateInfo confUpdate)}
+ pendingMutations.removeFirst();
+ return true;
+ }
+
+ private byte[] serLogMutation(LogMutation mutation) throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try (ObjectOutput oos = new ObjectOutputStream(baos)) {
+ oos.writeObject(mutation);
+ oos.flush();
+ return baos.toByteArray();
+ }
+ }
+ private LogMutation deserLogMutation(byte[] mutation) throws IOException {
+ try (ObjectInput input = new ObjectInputStream(
+ new ByteArrayInputStream(mutation))) {
+ return (LogMutation) input.readObject();
+ } catch (ClassNotFoundException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public synchronized Configuration retrieve() {
+ DBIterator itr = db.iterator();
+ itr.seek(bytes(LOG_COMMITTED_TXN));
+ Configuration config = new Configuration(false);
+ itr.next();
+ while (itr.hasNext()) {
+ Map.Entry<byte[], byte[]> entry = itr.next();
+ config.set(new String(entry.getKey(), StandardCharsets.UTF_8),
+ new String(entry.getValue(), StandardCharsets.UTF_8));
+ }
+ return config;
+ }
+
+ @Override
+ public List<LogMutation> getPendingMutations() {
+ return pendingMutations;
+ }
+
+ @Override
+ public List<LogMutation> getConfirmedConfHistory(long fromId) {
+ return null; // unimplemented
+ }
+
+ // TODO below was taken from LeveldbRMStateStore, it can probably be
+ // refactored
+ private void startCompactionTimer() {
+ if (compactionIntervalMsec > 0) {
+ compactionTimer = new Timer(
+ this.getClass().getSimpleName() + " compaction timer", true);
+ compactionTimer.schedule(new CompactionTimerTask(),
+ compactionIntervalMsec, compactionIntervalMsec);
+ }
+ }
+
+ private class CompactionTimerTask extends TimerTask {
+ @Override
+ public void run() {
+ long start = Time.monotonicNow();
+ LOG.info("Starting full compaction cycle");
+ try {
+ db.compactRange(null, null);
+ } catch (DBException e) {
+ LOG.error("Error compacting database", e);
+ }
+ long duration = Time.monotonicNow() - start;
+ LOG.info("Full compaction cycle completed in " + duration + " msec");
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 670c0f9..9ccc146 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -19,6 +19,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import com.google.common.base.Joiner;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@@ -49,6 +51,9 @@ import java.util.Map;
public class MutableCSConfigurationProvider implements CSConfigurationProvider,
MutableConfigurationProvider {
+ public static final Log LOG =
+ LogFactory.getLog(MutableCSConfigurationProvider.class);
+
private Configuration schedConf;
private YarnConfigurationStore confStore;
private ConfigurationMutationACLPolicy aclMutationPolicy;
@@ -68,6 +73,9 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
case YarnConfiguration.MEMORY_CONFIGURATION_STORE:
this.confStore = new InMemoryConfigurationStore();
break;
+ case YarnConfiguration.LEVELDB_CONFIGURATION_STORE:
+ this.confStore = new LeveldbConfigurationStore();
+ break;
default:
this.confStore = YarnConfigurationStoreFactory.getStore(config);
break;
@@ -82,6 +90,9 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
schedConf.set(kv.getKey(), kv.getValue());
}
confStore.initialize(config, schedConf);
+ // After initializing confStore, the store may already have an existing
+ // configuration. Use this one.
+ schedConf = confStore.retrieve();
this.aclMutationPolicy = ConfigurationMutationACLPolicyFactory
.getPolicy(config);
aclMutationPolicy.init(config, rmContext);
@@ -97,7 +108,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
}
@Override
- public void mutateConfiguration(UserGroupInformation user,
+ public synchronized void mutateConfiguration(UserGroupInformation user,
SchedConfUpdateInfo confUpdate) throws IOException {
if (!aclMutationPolicy.isMutationAllowed(user, confUpdate)) {
throw new AccessControlException("User is not admin of all modified" +
@@ -124,6 +135,31 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
confStore.confirmMutation(id, true);
}
+ @Override
+ public void recoverConf() throws IOException {
+ List<LogMutation> uncommittedLogs = confStore.getPendingMutations();
+ Configuration oldConf = new Configuration(schedConf);
+ for (LogMutation mutation : uncommittedLogs) {
+ for (Map.Entry<String, String> kv : mutation.getUpdates().entrySet()) {
+ if (kv.getValue() == null) {
+ schedConf.unset(kv.getKey());
+ } else {
+ schedConf.set(kv.getKey(), kv.getValue());
+ }
+ }
+ try {
+ rmContext.getScheduler().reinitialize(conf, rmContext);
+ } catch (IOException e) {
+ schedConf = oldConf;
+ confStore.confirmMutation(mutation.getId(), false);
+ LOG.info("Configuration mutation " + mutation.getId()
+ + " was rejected", e);
+ continue;
+ }
+ confStore.confirmMutation(mutation.getId(), true);
+ LOG.info("Configuration mutation " + mutation.getId()+ " was accepted");
+ }
+ }
private Map<String, String> constructKeyValueConfUpdate(
SchedConfUpdateInfo mutationInfo) throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
index 22c0ef8..065c877 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import java.io.IOException;
+import java.io.Serializable;
import java.util.List;
import java.util.Map;
@@ -43,7 +45,7 @@ public interface YarnConfigurationStore {
* LogMutation encapsulates the fields needed for configuration mutation
* audit logging and recovery.
*/
- class LogMutation {
+ class LogMutation implements Serializable {
private Map<String, String> updates;
private String user;
private long id;
@@ -106,16 +108,19 @@ public interface YarnConfigurationStore {
* Initialize the configuration store.
* @param conf configuration to initialize store with
* @param schedConf Initial key-value configuration to persist
+ * @throws IOException if initialization fails
*/
- void initialize(Configuration conf, Configuration schedConf);
+ void initialize(Configuration conf, Configuration schedConf)
+ throws IOException;
/**
* Logs the configuration change to backing store. Generates an id associated
* with this mutation, sets it in {@code logMutation}, and returns it.
* @param logMutation configuration change to be persisted in write ahead log
* @return id which configuration store associates with this mutation
+ * @throws IOException if logging fails
*/
- long logMutation(LogMutation logMutation);
+ long logMutation(LogMutation logMutation) throws IOException;
/**
* Should be called after {@code logMutation}. Gets the pending mutation
@@ -130,8 +135,9 @@ public interface YarnConfigurationStore {
* @param isValid if true, update persisted configuration with mutation
* associated with {@code id}.
* @return true on success
+ * @throws IOException if mutation confirmation fails
*/
- boolean confirmMutation(long id, boolean isValid);
+ boolean confirmMutation(long id, boolean isValid) throws IOException;
/**
* Retrieve the persisted configuration.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3e2b6fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
index dff4e77..631ce65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestYarnConfigurationStore.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.Yar
import org.junit.Before;
import org.junit.Test;
+import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -43,7 +44,7 @@ public class TestYarnConfigurationStore {
}
@Test
- public void testInMemoryConfigurationStore() {
+ public void testInMemoryConfigurationStore() throws IOException {
confStore = new InMemoryConfigurationStore();
confStore.initialize(new Configuration(), schedConf);
assertEquals("val1", confStore.retrieve().get("key1"));
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/20] hadoop git commit: YARN-5952. Create REST API for changing
YARN scheduler configurations. (Jonathan Hung via wangda)
Posted by xg...@apache.org.
YARN-5952. Create REST API for changing YARN scheduler configurations. (Jonathan Hung via wangda)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93c17472
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93c17472
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93c17472
Branch: refs/heads/YARN-5734
Commit: 93c17472ee46924c2c4e8df8400a5625166dee7f
Parents: fc19c35
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Apr 3 10:12:01 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:38 2017 -0700
----------------------------------------------------------------------
.../scheduler/MutableConfScheduler.java | 40 ++
.../scheduler/MutableConfigurationProvider.java | 5 +-
.../scheduler/capacity/CapacityScheduler.java | 16 +-
.../conf/InMemoryConfigurationStore.java | 6 +-
.../conf/MutableCSConfigurationProvider.java | 24 +-
.../resourcemanager/webapp/RMWebServices.java | 172 ++++++-
.../webapp/dao/QueueConfigInfo.java | 57 +++
.../webapp/dao/QueueConfigsUpdateInfo.java | 60 +++
.../TestMutableCSConfigurationProvider.java | 6 +-
.../TestRMWebServicesConfigurationMutation.java | 477 +++++++++++++++++++
10 files changed, 851 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
new file mode 100644
index 0000000..35e36e1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface for a scheduler that supports changing configuration at runtime.
+ *
+ */
+public interface MutableConfScheduler extends ResourceScheduler {
+
+ /**
+ * Update the scheduler's configuration.
+ * @param user Caller of this update
+ * @param confUpdate key-value map of the configuration update
+ * @throws IOException if update is invalid
+ */
+ void updateConfiguration(UserGroupInformation user,
+ Map<String, String> confUpdate) throws IOException;
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index da30a2b..889c3bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+import java.io.IOException;
import java.util.Map;
/**
@@ -29,7 +30,9 @@ public interface MutableConfigurationProvider {
* Update the scheduler configuration with the provided key value pairs.
* @param user User issuing the request
* @param confUpdate Key-value pairs for configurations to be updated.
+ * @throws IOException if scheduler could not be reinitialized
*/
- void mutateConfiguration(String user, Map<String, String> confUpdate);
+ void mutateConfiguration(String user, Map<String, String> confUpdate)
+ throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index ca6e872..ac1748a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -86,6 +86,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnSched
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
@@ -150,7 +152,7 @@ import com.google.common.util.concurrent.SettableFuture;
public class CapacityScheduler extends
AbstractYarnScheduler<FiCaSchedulerApp, FiCaSchedulerNode> implements
PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
- ResourceAllocationCommitter {
+ ResourceAllocationCommitter, MutableConfScheduler {
private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
@@ -2512,4 +2514,16 @@ public class CapacityScheduler extends
writeLock.unlock();
}
}
+
+ @Override
+ public void updateConfiguration(UserGroupInformation user,
+ Map<String, String> confUpdate) throws IOException {
+ if (csConfProvider instanceof MutableConfigurationProvider) {
+ ((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
+ user.getShortUserName(), confUpdate);
+ } else {
+ throw new UnsupportedOperationException("Configured CS configuration " +
+ "provider does not support updating configuration.");
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
index a208fb9..b97be1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -58,7 +58,11 @@ public class InMemoryConfigurationStore implements YarnConfigurationStore {
if (isValid) {
Map<String, String> mutations = mutation.getUpdates();
for (Map.Entry<String, String> kv : mutations.entrySet()) {
- schedConf.set(kv.getKey(), kv.getValue());
+ if (kv.getValue() == null) {
+ schedConf.unset(kv.getKey());
+ } else {
+ schedConf.set(kv.getKey(), kv.getValue());
+ }
}
}
return true;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 267ab6a..ea1b3c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -60,34 +60,44 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
}
Configuration initialSchedConf = new Configuration(false);
initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
- this.schedConf = initialSchedConf;
- confStore.initialize(config, initialSchedConf);
+ this.schedConf = new Configuration(false);
+ // We need to explicitly set the key-values in schedConf, otherwise
+ // these configuration keys cannot be deleted when
+ // configuration is reloaded.
+ for (Map.Entry<String, String> kv : initialSchedConf) {
+ schedConf.set(kv.getKey(), kv.getValue());
+ }
+ confStore.initialize(config, schedConf);
this.conf = config;
}
@Override
public CapacitySchedulerConfiguration loadConfiguration(Configuration
configuration) throws IOException {
- Configuration loadedConf = new Configuration(configuration);
- loadedConf.addResource(schedConf);
+ Configuration loadedConf = new Configuration(schedConf);
+ loadedConf.addResource(configuration);
return new CapacitySchedulerConfiguration(loadedConf, false);
}
@Override
public void mutateConfiguration(String user,
- Map<String, String> confUpdate) {
+ Map<String, String> confUpdate) throws IOException {
Configuration oldConf = new Configuration(schedConf);
LogMutation log = new LogMutation(confUpdate, user);
long id = confStore.logMutation(log);
for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
- schedConf.set(kv.getKey(), kv.getValue());
+ if (kv.getValue() == null) {
+ schedConf.unset(kv.getKey());
+ } else {
+ schedConf.set(kv.getKey(), kv.getValue());
+ }
}
try {
rmContext.getScheduler().reinitialize(conf, rmContext);
} catch (IOException e) {
schedConf = oldConf;
confStore.confirmMutation(id, false);
- return;
+ throw e;
}
confStore.confirmMutation(id, true);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index c537b7e..56a0bf8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -55,7 +55,8 @@ import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
-
+import com.google.common.base.Joiner;
+import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -127,11 +128,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@@ -2404,4 +2408,170 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
app.getApplicationTimeouts().get(appTimeout.getTimeoutType()));
return Response.status(Status.OK).entity(timeout).build();
}
+
+ @PUT
+ @Path("/queues")
+ @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+ MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+ @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+ public Response updateSchedulerConfiguration(QueueConfigsUpdateInfo
+ mutationInfo, @Context HttpServletRequest hsr)
+ throws AuthorizationException, InterruptedException {
+ init();
+
+ UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+ ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
+ if (aclsManager.areACLsEnabled()) {
+ if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
+ String msg = "Only admins can carry out this operation.";
+ throw new ForbiddenException(msg);
+ }
+ }
+
+ ResourceScheduler scheduler = rm.getResourceScheduler();
+ if (scheduler instanceof MutableConfScheduler) {
+ try {
+ callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws IOException, YarnException {
+ Map<String, String> confUpdate =
+ constructKeyValueConfUpdate(mutationInfo);
+ ((CapacityScheduler) scheduler).updateConfiguration(callerUGI,
+ confUpdate);
+ return null;
+ }
+ });
+ } catch (IOException e) {
+ return Response.status(Status.BAD_REQUEST).entity(e.getMessage())
+ .build();
+ }
+ return Response.status(Status.OK).entity("Configuration change " +
+ "successfully applied.").build();
+ } else {
+ return Response.status(Status.BAD_REQUEST)
+ .entity("Configuration change only supported by CapacityScheduler.")
+ .build();
+ }
+ }
+
+ private Map<String, String> constructKeyValueConfUpdate(
+ QueueConfigsUpdateInfo mutationInfo) throws IOException {
+ CapacitySchedulerConfiguration currentConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ CapacitySchedulerConfiguration proposedConf =
+ new CapacitySchedulerConfiguration(currentConf, false);
+ Map<String, String> confUpdate = new HashMap<>();
+ for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
+ removeQueue(queueToRemove, proposedConf, confUpdate);
+ }
+ for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
+ addQueue(addQueueInfo, proposedConf, confUpdate);
+ }
+ for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
+ updateQueue(updateQueueInfo, proposedConf, confUpdate);
+ }
+ return confUpdate;
+ }
+
+ private void removeQueue(
+ String queueToRemove, CapacitySchedulerConfiguration proposedConf,
+ Map<String, String> confUpdate) throws IOException {
+ if (queueToRemove == null) {
+ return;
+ } else {
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+ String queueName = queueToRemove.substring(
+ queueToRemove.lastIndexOf('.') + 1);
+ CSQueue queue = cs.getQueue(queueName);
+ if (queue == null ||
+ !queue.getQueuePath().equals(queueToRemove)) {
+ throw new IOException("Queue " + queueToRemove + " not found");
+ } else if (queueToRemove.lastIndexOf('.') == -1) {
+ throw new IOException("Can't remove queue " + queueToRemove);
+ }
+ String parentQueuePath = queueToRemove.substring(0, queueToRemove
+ .lastIndexOf('.'));
+ String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
+ List<String> newSiblingQueues = new ArrayList<>();
+ for (String siblingQueue : siblingQueues) {
+ if (!siblingQueue.equals(queueName)) {
+ newSiblingQueues.add(siblingQueue);
+ }
+ }
+ proposedConf.setQueues(parentQueuePath, newSiblingQueues
+ .toArray(new String[0]));
+ String queuesConfig = CapacitySchedulerConfiguration.PREFIX +
+ parentQueuePath + CapacitySchedulerConfiguration.DOT +
+ CapacitySchedulerConfiguration.QUEUES;
+ if (newSiblingQueues.size() == 0) {
+ confUpdate.put(queuesConfig, null);
+ } else {
+ confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
+ }
+ for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
+ ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
+ .entrySet()) {
+ proposedConf.unset(confRemove.getKey());
+ confUpdate.put(confRemove.getKey(), null);
+ }
+ }
+ }
+
+ private void addQueue(
+ QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
+ Map<String, String> confUpdate) throws IOException {
+ if (addInfo == null) {
+ return;
+ } else {
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+ String queuePath = addInfo.getQueue();
+ String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
+ if (cs.getQueue(queueName) != null) {
+ throw new IOException("Can't add existing queue " + queuePath);
+ } else if (queuePath.lastIndexOf('.') == -1) {
+ throw new IOException("Can't add invalid queue " + queuePath);
+ }
+ String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
+ String[] siblings = proposedConf.getQueues(parentQueue);
+ List<String> siblingQueues = siblings == null ? new ArrayList<>() :
+ new ArrayList<>(Arrays.<String>asList(siblings));
+ siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
+ proposedConf.setQueues(parentQueue,
+ siblingQueues.toArray(new String[0]));
+ confUpdate.put(CapacitySchedulerConfiguration.PREFIX +
+ parentQueue + CapacitySchedulerConfiguration.DOT +
+ CapacitySchedulerConfiguration.QUEUES,
+ Joiner.on(',').join(siblingQueues));
+ String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
+ queuePath + CapacitySchedulerConfiguration.DOT;
+ for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
+ if (kv.getValue() == null) {
+ proposedConf.unset(keyPrefix + kv.getKey());
+ } else {
+ proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ }
+ }
+
+ private void updateQueue(QueueConfigInfo updateInfo,
+ CapacitySchedulerConfiguration proposedConf,
+ Map<String, String> confUpdate) {
+ if (updateInfo == null) {
+ return;
+ } else {
+ String queuePath = updateInfo.getQueue();
+ String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
+ queuePath + CapacitySchedulerConfiguration.DOT;
+ for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
+ if (kv.getValue() == null) {
+ proposedConf.unset(keyPrefix + kv.getKey());
+ } else {
+ proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
new file mode 100644
index 0000000..b20eda6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for adding or updating a queue to scheduler configuration
+ * for this queue.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigInfo {
+
+ @XmlElement(name = "queueName")
+ private String queue;
+
+ private HashMap<String, String> params = new HashMap<>();
+
+ public QueueConfigInfo() { }
+
+ public QueueConfigInfo(String queue, Map<String, String> params) {
+ this.queue = queue;
+ this.params = new HashMap<>(params);
+ }
+
+ public String getQueue() {
+ return this.queue;
+ }
+
+ public HashMap<String, String> getParams() {
+ return this.params;
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
new file mode 100644
index 0000000..644ec90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for making scheduler configuration changes (supports adding,
+ * removing, or updating a queue).
+ */
+@XmlRootElement(name = "schedConf")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigsUpdateInfo {
+
+ @XmlElement(name = "add")
+ private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
+
+ @XmlElement(name = "remove")
+ private ArrayList<String> removeQueueInfo = new ArrayList<>();
+
+ @XmlElement(name = "update")
+ private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
+
+ public QueueConfigsUpdateInfo() {
+ // JAXB needs this
+ }
+
+ public ArrayList<QueueConfigInfo> getAddQueueInfo() {
+ return addQueueInfo;
+ }
+
+ public ArrayList<String> getRemoveQueueInfo() {
+ return removeQueueInfo;
+ }
+
+ public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
+ return updateQueueInfo;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 3f103b1..254da31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -77,7 +77,11 @@ public class TestMutableCSConfigurationProvider {
assertNull(confProvider.loadConfiguration(conf).get("badKey"));
doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
any(RMContext.class));
- confProvider.mutateConfiguration(TEST_USER, badUpdate);
+ try {
+ confProvider.mutateConfiguration(TEST_USER, badUpdate);
+ } catch (IOException e) {
+ // Expected exception.
+ }
assertNull(confProvider.loadConfiguration(conf).get("badKey"));
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/93c17472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
new file mode 100644
index 0000000..d149055
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -0,0 +1,477 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response.Status;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Test scheduler configuration mutation via REST API.
+ */
+public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+
+ private static final File CONF_FILE = new File(new File("target",
+ "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
+ private static final File OLD_CONF_FILE = new File(new File("target",
+ "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE + ".tmp");
+
+ private static MockRM rm;
+ private static String userName;
+ private static CapacitySchedulerConfiguration csConf;
+ private static YarnConfiguration conf;
+
+ private static class WebServletModule extends ServletModule {
+ @Override
+ protected void configureServlets() {
+ bind(JAXBContextResolver.class);
+ bind(RMWebServices.class);
+ bind(GenericExceptionHandler.class);
+ try {
+ userName = UserGroupInformation.getCurrentUser().getShortUserName();
+ } catch (IOException ioe) {
+ throw new RuntimeException("Unable to get current user name "
+ + ioe.getMessage(), ioe);
+ }
+ csConf = new CapacitySchedulerConfiguration(new Configuration(false),
+ false);
+ setupQueueConfiguration(csConf);
+ conf = new YarnConfiguration();
+ conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ ResourceScheduler.class);
+ conf.set(CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+ CapacitySchedulerConfiguration.STORE_CS_CONF_PROVIDER);
+ conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName);
+ try {
+ if (CONF_FILE.exists()) {
+ if (!CONF_FILE.renameTo(OLD_CONF_FILE)) {
+ throw new RuntimeException("Failed to rename conf file");
+ }
+ }
+ FileOutputStream out = new FileOutputStream(CONF_FILE);
+ csConf.writeXml(out);
+ out.close();
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to write XML file", e);
+ }
+ rm = new MockRM(conf);
+ bind(ResourceManager.class).toInstance(rm);
+ serve("/*").with(GuiceContainer.class);
+ filter("/*").through(TestRMWebServicesAppsModification
+ .TestRMCustomAuthFilter.class);
+ }
+ }
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ GuiceServletConfig.setInjector(
+ Guice.createInjector(new WebServletModule()));
+ }
+
+ private static void setupQueueConfiguration(
+ CapacitySchedulerConfiguration config) {
+ config.setQueues(CapacitySchedulerConfiguration.ROOT,
+ new String[]{"a", "b", "c"});
+
+ final String a = CapacitySchedulerConfiguration.ROOT + ".a";
+ config.setCapacity(a, 25f);
+ config.setMaximumCapacity(a, 50f);
+
+ final String a1 = a + ".a1";
+ final String a2 = a + ".a2";
+ config.setQueues(a, new String[]{"a1", "a2"});
+ config.setCapacity(a1, 100f);
+ config.setCapacity(a2, 0f);
+
+ final String b = CapacitySchedulerConfiguration.ROOT + ".b";
+ config.setCapacity(b, 75f);
+
+ final String c = CapacitySchedulerConfiguration.ROOT + ".c";
+ config.setCapacity(c, 0f);
+
+ final String c1 = c + ".c1";
+ config.setQueues(c, new String[] {"c1"});
+ config.setCapacity(c1, 0f);
+ }
+
+ public TestRMWebServicesConfigurationMutation() {
+ super(new WebAppDescriptor.Builder(
+ "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+ .contextListenerClass(GuiceServletConfig.class)
+ .filterClass(com.google.inject.servlet.GuiceFilter.class)
+ .contextPath("jersey-guice-filter").servletPath("/").build());
+ }
+
+ @Test
+ public void testAddNestedQueue() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ // Add parent queue root.d with two children d1 and d2.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ Map<String, String> d1Capacity = new HashMap<>();
+ d1Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
+ d1Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "25");
+ Map<String, String> nearEmptyCapacity = new HashMap<>();
+ nearEmptyCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "1E-4");
+ nearEmptyCapacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY,
+ "1E-4");
+ Map<String, String> d2Capacity = new HashMap<>();
+ d2Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "75");
+ d2Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "75");
+ QueueConfigInfo d1 = new QueueConfigInfo("root.d.d1", d1Capacity);
+ QueueConfigInfo d2 = new QueueConfigInfo("root.d.d2", d2Capacity);
+ QueueConfigInfo d = new QueueConfigInfo("root.d", nearEmptyCapacity);
+ updateInfo.getAddQueueInfo().add(d1);
+ updateInfo.getAddQueueInfo().add(d2);
+ updateInfo.getAddQueueInfo().add(d);
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(4, newCSConf.getQueues("root").length);
+ assertEquals(2, newCSConf.getQueues("root.d").length);
+ assertEquals(25.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d1"),
+ 0.01f);
+ assertEquals(75.0f, newCSConf.getNonLabeledQueueCapacity("root.d.d2"),
+ 0.01f);
+ }
+
+ @Test
+ public void testAddWithUpdate() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ // Add root.d with capacity 25, reducing root.b capacity from 75 to 50.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ Map<String, String> dCapacity = new HashMap<>();
+ dCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
+ Map<String, String> bCapacity = new HashMap<>();
+ bCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "50");
+ QueueConfigInfo d = new QueueConfigInfo("root.d", dCapacity);
+ QueueConfigInfo b = new QueueConfigInfo("root.b", bCapacity);
+ updateInfo.getAddQueueInfo().add(d);
+ updateInfo.getUpdateQueueInfo().add(b);
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(4, newCSConf.getQueues("root").length);
+ assertEquals(25.0f, newCSConf.getNonLabeledQueueCapacity("root.d"), 0.01f);
+ assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
+ }
+
+ @Test
+ public void testRemoveQueue() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ stopQueue("root.a.a2");
+ // Remove root.a.a2
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ updateInfo.getRemoveQueueInfo().add("root.a.a2");
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(1, newCSConf.getQueues("root.a").length);
+ assertEquals("a1", newCSConf.getQueues("root.a")[0]);
+ }
+
+ @Test
+ public void testRemoveParentQueue() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ stopQueue("root.c", "root.c.c1");
+ // Remove root.c (parent queue)
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ updateInfo.getRemoveQueueInfo().add("root.c");
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(2, newCSConf.getQueues("root").length);
+ assertNull(newCSConf.getQueues("root.c"));
+ }
+
+ @Test
+ public void testRemoveParentQueueWithCapacity() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ stopQueue("root.a", "root.a.a1", "root.a.a2");
+ // Remove root.a (parent queue) with capacity 25
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ updateInfo.getRemoveQueueInfo().add("root.a");
+
+ // Set root.b capacity to 100
+ Map<String, String> bCapacity = new HashMap<>();
+ bCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "100");
+ QueueConfigInfo b = new QueueConfigInfo("root.b", bCapacity);
+ updateInfo.getUpdateQueueInfo().add(b);
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(2, newCSConf.getQueues("root").length);
+ assertEquals(100.0f, newCSConf.getNonLabeledQueueCapacity("root.b"),
+ 0.01f);
+ }
+
+ @Test
+ public void testRemoveMultipleQueues() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ stopQueue("root.b", "root.c", "root.c.c1");
+ // Remove root.b and root.c
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ updateInfo.getRemoveQueueInfo().add("root.b");
+ updateInfo.getRemoveQueueInfo().add("root.c");
+ Map<String, String> aCapacity = new HashMap<>();
+ aCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "100");
+ aCapacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "100");
+ QueueConfigInfo configInfo = new QueueConfigInfo("root.a", aCapacity);
+ updateInfo.getUpdateQueueInfo().add(configInfo);
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(1, newCSConf.getQueues("root").length);
+ }
+
+ private void stopQueue(String... queuePaths) throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ // Set state of queues to STOPPED.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ Map<String, String> stoppedParam = new HashMap<>();
+ stoppedParam.put(CapacitySchedulerConfiguration.STATE,
+ QueueState.STOPPED.toString());
+ for (String queue : queuePaths) {
+ QueueConfigInfo stoppedInfo = new QueueConfigInfo(queue, stoppedParam);
+ updateInfo.getUpdateQueueInfo().add(stoppedInfo);
+ }
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ for (String queue : queuePaths) {
+ assertEquals(QueueState.STOPPED, newCSConf.getState(queue));
+ }
+ }
+
+ @Test
+ public void testUpdateQueue() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ // Update config value.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ Map<String, String> updateParam = new HashMap<>();
+ updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
+ "0.2");
+ QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+ updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+
+ assertEquals(CapacitySchedulerConfiguration
+ .DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,
+ cs.getConfiguration()
+ .getMaximumApplicationMasterResourcePerQueuePercent("root.a"),
+ 0.001f);
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
+ assertEquals(0.2f, newCSConf
+ .getMaximumApplicationMasterResourcePerQueuePercent("root.a"), 0.001f);
+
+ // Remove config. Config value should be reverted to default.
+ updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
+ null);
+ aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+ updateInfo.getUpdateQueueInfo().clear();
+ updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ newCSConf = cs.getConfiguration();
+ assertEquals(CapacitySchedulerConfiguration
+ .DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT, newCSConf
+ .getMaximumApplicationMasterResourcePerQueuePercent("root.a"),
+ 0.001f);
+ }
+
+ @Test
+ public void testUpdateQueueCapacity() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ // Update root.a and root.b capacity to 50.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ Map<String, String> updateParam = new HashMap<>();
+ updateParam.put(CapacitySchedulerConfiguration.CAPACITY, "50");
+ QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
+ QueueConfigInfo bUpdateInfo = new QueueConfigInfo("root.b", updateParam);
+ updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
+ updateInfo.getUpdateQueueInfo().add(bUpdateInfo);
+
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("queues").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.a"), 0.01f);
+ assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ if (rm != null) {
+ rm.stop();
+ }
+ CONF_FILE.delete();
+ if (!OLD_CONF_FILE.renameTo(CONF_FILE)) {
+ throw new RuntimeException("Failed to re-copy old configuration file");
+ }
+ super.tearDown();
+ }
+
+ @SuppressWarnings("rawtypes")
+ private String toJson(Object nsli, Class klass) throws Exception {
+ StringWriter sw = new StringWriter();
+ JSONJAXBContext ctx = new JSONJAXBContext(klass);
+ JSONMarshaller jm = ctx.createJSONMarshaller();
+ jm.marshallToJSON(nsli, sw);
+ return sw.toString();
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/20] hadoop git commit: HADOOP-14420. generateReports property is
not applicable for maven-site-plugin:attach-descriptor goal. Contributed by
Andras Bokor.
Posted by xg...@apache.org.
HADOOP-14420. generateReports property is not applicable for maven-site-plugin:attach-descriptor goal. Contributed by Andras Bokor.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7d85866
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7d85866
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7d85866
Branch: refs/heads/YARN-5734
Commit: a7d858668ab0e458867b659499fe6a4363284ee2
Parents: fbb7d6b
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 31 15:07:22 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 31 15:07:22 2017 -0700
----------------------------------------------------------------------
pom.xml | 3 ---
1 file changed, 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d85866/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 29524a4..d82cd9f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -389,9 +389,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<goals>
<goal>attach-descriptor</goal>
</goals>
- <configuration>
- <generateReports>true</generateReports>
- </configuration>
</execution>
</executions>
</plugin>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/20] hadoop git commit: HADOOP-14397. Pull up the builder pattern
to FileSystem and add AbstractContractCreateTest for it. (Lei (Eddy) Xu)
Posted by xg...@apache.org.
HADOOP-14397. Pull up the builder pattern to FileSystem and add AbstractContractCreateTest for it. (Lei (Eddy) Xu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9586b0e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9586b0e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9586b0e2
Branch: refs/heads/YARN-5734
Commit: 9586b0e24fce29c278134658e68b8c47cd9d8c51
Parents: abbf412
Author: Lei Xu <le...@cloudera.com>
Authored: Mon Jul 31 20:04:57 2017 -0700
Committer: Lei Xu <le...@cloudera.com>
Committed: Mon Jul 31 20:12:40 2017 -0700
----------------------------------------------------------------------
.../hadoop/fs/FSDataOutputStreamBuilder.java | 4 +-
.../java/org/apache/hadoop/fs/FileSystem.java | 24 ++++--
.../apache/hadoop/fs/TestLocalFileSystem.java | 2 +-
.../fs/contract/AbstractContractAppendTest.java | 33 ++++++-
.../fs/contract/AbstractContractCreateTest.java | 90 ++++++++++++++------
.../hadoop/fs/contract/ContractTestUtils.java | 43 ++++++++--
.../hadoop/hdfs/DistributedFileSystem.java | 3 +-
7 files changed, 154 insertions(+), 45 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 0527202..8608a7b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -44,8 +44,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
*
* To create missing parent directory, use {@link #recursive()}.
*/
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public abstract class FSDataOutputStreamBuilder
<S extends FSDataOutputStream, B extends FSDataOutputStreamBuilder<S, B>> {
private final FileSystem fs;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index d7cd7dd..fc7b9b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -4153,9 +4153,21 @@ public abstract class FileSystem extends Configured implements Closeable {
@Override
public FSDataOutputStream build() throws IOException {
- return getFS().create(getPath(), getPermission(), getFlags(),
- getBufferSize(), getReplication(), getBlockSize(), getProgress(),
- getChecksumOpt());
+ if (getFlags().contains(CreateFlag.CREATE) ||
+ getFlags().contains(CreateFlag.OVERWRITE)) {
+ if (isRecursive()) {
+ return getFS().create(getPath(), getPermission(), getFlags(),
+ getBufferSize(), getReplication(), getBlockSize(), getProgress(),
+ getChecksumOpt());
+ } else {
+ return getFS().createNonRecursive(getPath(), getPermission(),
+ getFlags(), getBufferSize(), getReplication(), getBlockSize(),
+ getProgress());
+ }
+ } else if (getFlags().contains(CreateFlag.APPEND)) {
+ return getFS().append(getPath(), getBufferSize(), getProgress());
+ }
+ throw new IOException("Must specify either create, overwrite or append");
}
@Override
@@ -4174,8 +4186,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* HADOOP-14384. Temporarily reduce the visibility of method before the
* builder interface becomes stable.
*/
- @InterfaceAudience.Private
- protected FSDataOutputStreamBuilder createFile(Path path) {
+ public FSDataOutputStreamBuilder createFile(Path path) {
return new FileSystemDataOutputStreamBuilder(this, path)
.create().overwrite(true);
}
@@ -4185,8 +4196,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* @param path file path.
* @return a {@link FSDataOutputStreamBuilder} to build file append request.
*/
- @InterfaceAudience.Private
- protected FSDataOutputStreamBuilder appendFile(Path path) {
+ public FSDataOutputStreamBuilder appendFile(Path path) {
return new FileSystemDataOutputStreamBuilder(this, path).append();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 527b9eb..00cedc3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -659,7 +659,7 @@ public class TestLocalFileSystem {
try {
FSDataOutputStreamBuilder builder =
- fileSys.createFile(path);
+ fileSys.createFile(path).recursive();
FSDataOutputStream out = builder.build();
String content = "Create with a generic type of createFile!";
byte[] contentOrigin = content.getBytes("UTF8");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index 6b3e98b..d61b635 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -61,6 +61,19 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
}
@Test
+ public void testBuilderAppendToEmptyFile() throws Throwable {
+ touch(getFileSystem(), target);
+ byte[] dataset = dataset(256, 'a', 'z');
+ try (FSDataOutputStream outputStream =
+ getFileSystem().appendFile(target).build()) {
+ outputStream.write(dataset);
+ }
+ byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
+ dataset.length);
+ ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
+ }
+
+ @Test
public void testAppendNonexistentFile() throws Throwable {
try {
FSDataOutputStream out = getFileSystem().append(target);
@@ -78,9 +91,9 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
byte[] original = dataset(8192, 'A', 'Z');
byte[] appended = dataset(8192, '0', '9');
createFile(getFileSystem(), target, false, original);
- FSDataOutputStream outputStream = getFileSystem().append(target);
- outputStream.write(appended);
- outputStream.close();
+ try (FSDataOutputStream out = getFileSystem().append(target)) {
+ out.write(appended);
+ }
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
original.length + appended.length);
ContractTestUtils.validateFileContent(bytes,
@@ -88,6 +101,20 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
}
@Test
+ public void testBuilderAppendToExistingFile() throws Throwable {
+ byte[] original = dataset(8192, 'A', 'Z');
+ byte[] appended = dataset(8192, '0', '9');
+ createFile(getFileSystem(), target, false, original);
+ try (FSDataOutputStream out = getFileSystem().appendFile(target).build()) {
+ out.write(appended);
+ }
+ byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
+ original.length + appended.length);
+ ContractTestUtils.validateFileContent(bytes,
+ new byte[][]{original, appended});
+ }
+
+ @Test
public void testAppendMissingTarget() throws Throwable {
try {
FSDataOutputStream out = getFileSystem().append(target);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index a9ce607..2053f50 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -47,24 +47,37 @@ public abstract class AbstractContractCreateTest extends
*/
public static final int CREATE_TIMEOUT = 15000;
- @Test
- public void testCreateNewFile() throws Throwable {
- describe("Foundational 'create a file' test");
- Path path = path("testCreateNewFile");
+ protected Path path(String filepath, boolean useBuilder) throws IOException {
+ return super.path(filepath + (useBuilder ? "" : "-builder"));
+ }
+
+ private void testCreateNewFile(boolean useBuilder) throws Throwable {
+ describe("Foundational 'create a file' test, using builder API=" +
+ useBuilder);
+ Path path = path("testCreateNewFile", useBuilder);
byte[] data = dataset(256, 'a', 'z');
- writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false);
+ writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false,
+ useBuilder);
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
}
@Test
- public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
- describe("Verify overwriting an existing file fails");
- Path path = path("testCreateFileOverExistingFileNoOverwrite");
+ public void testCreateNewFile() throws Throwable {
+ testCreateNewFile(true);
+ testCreateNewFile(false);
+ }
+
+ private void testCreateFileOverExistingFileNoOverwrite(boolean useBuilder)
+ throws Throwable {
+ describe("Verify overwriting an existing file fails, using builder API=" +
+ useBuilder);
+ Path path = path("testCreateFileOverExistingFileNoOverwrite", useBuilder);
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
try {
- writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
+ writeDataset(getFileSystem(), path, data2, data2.length, 1024, false,
+ useBuilder);
fail("writing without overwrite unexpectedly succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
@@ -76,6 +89,26 @@ public abstract class AbstractContractCreateTest extends
}
}
+ @Test
+ public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
+ testCreateFileOverExistingFileNoOverwrite(false);
+ testCreateFileOverExistingFileNoOverwrite(true);
+ }
+
+ private void testOverwriteExistingFile(boolean useBuilder) throws Throwable {
+ describe("Overwrite an existing file and verify the new data is there, " +
+ "use builder API=" + useBuilder);
+ Path path = path("testOverwriteExistingFile", useBuilder);
+ byte[] data = dataset(256, 'a', 'z');
+ writeDataset(getFileSystem(), path, data, data.length, 1024, false,
+ useBuilder);
+ ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
+ byte[] data2 = dataset(10 * 1024, 'A', 'Z');
+ writeDataset(getFileSystem(), path, data2, data2.length, 1024, true,
+ useBuilder);
+ ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
+ }
+
/**
* This test catches some eventual consistency problems that blobstores exhibit,
* as we are implicitly verifying that updates are consistent. This
@@ -84,25 +117,21 @@ public abstract class AbstractContractCreateTest extends
*/
@Test
public void testOverwriteExistingFile() throws Throwable {
- describe("Overwrite an existing file and verify the new data is there");
- Path path = path("testOverwriteExistingFile");
- byte[] data = dataset(256, 'a', 'z');
- writeDataset(getFileSystem(), path, data, data.length, 1024, false);
- ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
- byte[] data2 = dataset(10 * 1024, 'A', 'Z');
- writeDataset(getFileSystem(), path, data2, data2.length, 1024, true);
- ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
+ testOverwriteExistingFile(false);
+ testOverwriteExistingFile(true);
}
- @Test
- public void testOverwriteEmptyDirectory() throws Throwable {
- describe("verify trying to create a file over an empty dir fails");
+ private void testOverwriteEmptyDirectory(boolean useBuilder)
+ throws Throwable {
+ describe("verify trying to create a file over an empty dir fails, " +
+ "use builder API=" + useBuilder);
Path path = path("testOverwriteEmptyDirectory");
mkdirs(path);
assertIsDirectory(path);
byte[] data = dataset(256, 'a', 'z');
try {
- writeDataset(getFileSystem(), path, data, data.length, 1024, true);
+ writeDataset(getFileSystem(), path, data, data.length, 1024, true,
+ useBuilder);
assertIsDirectory(path);
fail("write of file over empty dir succeeded");
} catch (FileAlreadyExistsException expected) {
@@ -121,8 +150,15 @@ public abstract class AbstractContractCreateTest extends
}
@Test
- public void testOverwriteNonEmptyDirectory() throws Throwable {
- describe("verify trying to create a file over a non-empty dir fails");
+ public void testOverwriteEmptyDirectory() throws Throwable {
+ testOverwriteEmptyDirectory(false);
+ testOverwriteEmptyDirectory(true);
+ }
+
+ private void testOverwriteNonEmptyDirectory(boolean useBuilder)
+ throws Throwable {
+ describe("verify trying to create a file over a non-empty dir fails, " +
+ "use builder API=" + useBuilder);
Path path = path("testOverwriteNonEmptyDirectory");
mkdirs(path);
try {
@@ -140,7 +176,7 @@ public abstract class AbstractContractCreateTest extends
byte[] data = dataset(256, 'a', 'z');
try {
writeDataset(getFileSystem(), path, data, data.length, 1024,
- true);
+ true, useBuilder);
FileStatus status = getFileSystem().getFileStatus(path);
boolean isDir = status.isDirectory();
@@ -167,6 +203,12 @@ public abstract class AbstractContractCreateTest extends
}
@Test
+ public void testOverwriteNonEmptyDirectory() throws Throwable {
+ testOverwriteNonEmptyDirectory(false);
+ testOverwriteNonEmptyDirectory(true);
+ }
+
+ @Test
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
describe("verify that a newly created file exists as soon as open returns");
Path path = path("testCreatedFileIsImmediatelyVisible");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index e60fd43..c66dabf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -146,16 +146,45 @@ public class ContractTestUtils extends Assert {
int len,
int buffersize,
boolean overwrite) throws IOException {
+ writeDataset(fs, path, src, len, buffersize, overwrite, false);
+ }
+
+ /**
+ * Write a file.
+ * Optional flags control
+ * whether file overwrite operations should be enabled
+ * Optional using {@link org.apache.hadoop.fs.FSDataOutputStreamBuilder}
+ *
+ * @param fs filesystem
+ * @param path path to write to
+ * @param len length of data
+ * @param overwrite should the create option allow overwrites?
+ * @param useBuilder should use builder API to create file?
+ * @throws IOException IO problems
+ */
+ public static void writeDataset(FileSystem fs, Path path, byte[] src,
+ int len, int buffersize, boolean overwrite, boolean useBuilder)
+ throws IOException {
assertTrue(
"Not enough data in source array to write " + len + " bytes",
src.length >= len);
- FSDataOutputStream out = fs.create(path,
- overwrite,
- fs.getConf()
- .getInt(IO_FILE_BUFFER_SIZE_KEY,
- IO_FILE_BUFFER_SIZE_DEFAULT),
- (short) 1,
- buffersize);
+ FSDataOutputStream out;
+ if (useBuilder) {
+ out = fs.createFile(path)
+ .overwrite(overwrite)
+ .replication((short) 1)
+ .bufferSize(buffersize)
+ .blockSize(buffersize)
+ .build();
+ } else {
+ out = fs.create(path,
+ overwrite,
+ fs.getConf()
+ .getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT),
+ (short) 1,
+ buffersize);
+ }
out.write(src, 0, len);
out.close();
assertFileHasLength(fs, path, len);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 34c631a..13c5eb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2892,7 +2892,8 @@ public class DistributedFileSystem extends FileSystem {
*/
@Override
public FSDataOutputStream build() throws IOException {
- if (getFlags().contains(CreateFlag.CREATE)) {
+ if (getFlags().contains(CreateFlag.CREATE) ||
+ getFlags().contains(CreateFlag.OVERWRITE)) {
if (isRecursive()) {
return dfs.create(getPath(), getPermission(), getFlags(),
getBufferSize(), getReplication(), getBlockSize(),
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/20] hadoop git commit: YARN-5949. Add pluggable configuration ACL
policy interface and implementation. (Jonathan Hung via wangda)
Posted by xg...@apache.org.
YARN-5949. Add pluggable configuration ACL policy interface and implementation. (Jonathan Hung via wangda)
Change-Id: Ib98e82ff753bede21fcab2e6ca9ec1e7a5a2008f
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c1dcf5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c1dcf5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c1dcf5c
Branch: refs/heads/YARN-5734
Commit: 2c1dcf5cea698950bfa63943d78794ed59f59f92
Parents: 93c1747
Author: Wangda Tan <wa...@apache.org>
Authored: Mon May 22 13:38:31 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:40 2017 -0700
----------------------------------------------------------------------
.../hadoop/yarn/conf/YarnConfiguration.java | 3 +
.../src/main/resources/yarn-default.xml | 11 ++
.../ConfigurationMutationACLPolicy.java | 47 ++++++
.../ConfigurationMutationACLPolicyFactory.java | 49 ++++++
.../DefaultConfigurationMutationACLPolicy.java | 45 ++++++
.../scheduler/MutableConfScheduler.java | 19 ++-
.../scheduler/MutableConfigurationProvider.java | 8 +-
.../scheduler/capacity/CapacityScheduler.java | 6 +-
.../conf/MutableCSConfigurationProvider.java | 151 +++++++++++++++++-
...ueueAdminConfigurationMutationACLPolicy.java | 96 ++++++++++++
.../resourcemanager/webapp/RMWebServices.java | 131 +---------------
.../TestConfigurationMutationACLPolicies.java | 154 +++++++++++++++++++
.../TestMutableCSConfigurationProvider.java | 40 +++--
13 files changed, 610 insertions(+), 150 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ce413f6..01db626 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -627,6 +627,9 @@ public class YarnConfiguration extends Configuration {
public static final String DEFAULT_CONFIGURATION_STORE =
MEMORY_CONFIGURATION_STORE;
+ public static final String RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS =
+ YARN_PREFIX + "scheduler.configuration.mutation.acl-policy.class";
+
public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
+ "authorization-provider";
private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 74ff747..a0bed5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3148,4 +3148,15 @@
<value>memory</value>
</property>
+ <property>
+ <description>
+ The class to use for configuration mutation ACL policy if using a mutable
+ configuration provider. Controls whether a mutation request is allowed.
+ The DefaultConfigurationMutationACLPolicy checks if the requestor is a
+ YARN admin.
+ </description>
+ <name>yarn.scheduler.configuration.mutation.acl-policy.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.DefaultConfigurationMutationACLPolicy</value>
+ </property>
+
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..724487b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+/**
+ * Interface for determining whether configuration mutations are allowed.
+ */
+public interface ConfigurationMutationACLPolicy {
+
+ /**
+ * Initialize ACL policy with configuration and RMContext.
+ * @param conf Configuration to initialize with.
+ * @param rmContext rmContext
+ */
+ void init(Configuration conf, RMContext rmContext);
+
+ /**
+ * Check if mutation is allowed.
+ * @param user User issuing the request
+ * @param confUpdate configurations to be updated
+ * @return whether provided mutation is allowed or not
+ */
+ boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+ confUpdate);
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
new file mode 100644
index 0000000..2898785
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicyFactory.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Factory class for creating instances of
+ * {@link ConfigurationMutationACLPolicy}.
+ */
+public final class ConfigurationMutationACLPolicyFactory {
+
+ private static final Log LOG = LogFactory.getLog(
+ ConfigurationMutationACLPolicyFactory.class);
+
+ private ConfigurationMutationACLPolicyFactory() {
+ // Unused.
+ }
+
+ public static ConfigurationMutationACLPolicy getPolicy(Configuration conf) {
+ Class<? extends ConfigurationMutationACLPolicy> policyClass =
+ conf.getClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ DefaultConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ LOG.info("Using ConfigurationMutationACLPolicy implementation - " +
+ policyClass);
+ return ReflectionUtils.newInstance(policyClass, conf);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..680c3b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+/**
+ * Default configuration mutation ACL policy. Checks if user is YARN admin.
+ */
+public class DefaultConfigurationMutationACLPolicy implements
+ ConfigurationMutationACLPolicy {
+
+ private YarnAuthorizationProvider authorizer;
+
+ @Override
+ public void init(Configuration conf, RMContext rmContext) {
+ authorizer = YarnAuthorizationProvider.getInstance(conf);
+ }
+
+ @Override
+ public boolean isMutationAllowed(UserGroupInformation user,
+ QueueConfigsUpdateInfo confUpdate) {
+ return authorizer.isAdmin(user);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 35e36e1..93a935e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -17,10 +17,11 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
import java.io.IOException;
-import java.util.Map;
/**
* Interface for a scheduler that supports changing configuration at runtime.
@@ -31,10 +32,22 @@ public interface MutableConfScheduler extends ResourceScheduler {
/**
* Update the scheduler's configuration.
* @param user Caller of this update
- * @param confUpdate key-value map of the configuration update
+ * @param confUpdate configuration update
* @throws IOException if update is invalid
*/
void updateConfiguration(UserGroupInformation user,
- Map<String, String> confUpdate) throws IOException;
+ QueueConfigsUpdateInfo confUpdate) throws IOException;
+ /**
+ * Get the scheduler configuration.
+ * @return the scheduler configuration
+ */
+ Configuration getConfiguration();
+
+ /**
+ * Get queue object based on queue name.
+ * @param queueName the queue name
+ * @return the queue object
+ */
+ Queue getQueue(String queueName);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 889c3bc..f04c128 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
import java.io.IOException;
-import java.util.Map;
/**
* Interface for allowing changing scheduler configurations.
@@ -32,7 +34,7 @@ public interface MutableConfigurationProvider {
* @param confUpdate Key-value pairs for configurations to be updated.
* @throws IOException if scheduler could not be reinitialized
*/
- void mutateConfiguration(String user, Map<String, String> confUpdate)
- throws IOException;
+ void mutateConfiguration(UserGroupInformation user, QueueConfigsUpdateInfo
+ confUpdate) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index ac1748a..5bcb352 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -642,6 +643,7 @@ public class CapacityScheduler extends
preemptionManager.refreshQueues(null, this.getRootQueue());
}
+ @Override
public CSQueue getQueue(String queueName) {
if (queueName == null) {
return null;
@@ -2517,10 +2519,10 @@ public class CapacityScheduler extends
@Override
public void updateConfiguration(UserGroupInformation user,
- Map<String, String> confUpdate) throws IOException {
+ QueueConfigsUpdateInfo confUpdate) throws IOException {
if (csConfProvider instanceof MutableConfigurationProvider) {
((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
- user.getShortUserName(), confUpdate);
+ user, confUpdate);
} else {
throw new UnsupportedOperationException("Configured CS configuration " +
"provider does not support updating configuration.");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index ea1b3c0..8b879b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -18,14 +18,27 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+import com.google.common.base.Joiner;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicyFactory;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
import java.util.Map;
/**
@@ -38,6 +51,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
private Configuration schedConf;
private YarnConfigurationStore confStore;
+ private ConfigurationMutationACLPolicy aclMutationPolicy;
private RMContext rmContext;
private Configuration conf;
@@ -68,6 +82,9 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
schedConf.set(kv.getKey(), kv.getValue());
}
confStore.initialize(config, schedConf);
+ this.aclMutationPolicy = ConfigurationMutationACLPolicyFactory
+ .getPolicy(config);
+ aclMutationPolicy.init(config, rmContext);
this.conf = config;
}
@@ -80,12 +97,17 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
}
@Override
- public void mutateConfiguration(String user,
- Map<String, String> confUpdate) throws IOException {
+ public void mutateConfiguration(UserGroupInformation user,
+ QueueConfigsUpdateInfo confUpdate) throws IOException {
+ if (!aclMutationPolicy.isMutationAllowed(user, confUpdate)) {
+ throw new AccessControlException("User is not admin of all modified" +
+ " queues.");
+ }
Configuration oldConf = new Configuration(schedConf);
- LogMutation log = new LogMutation(confUpdate, user);
+ Map<String, String> kvUpdate = constructKeyValueConfUpdate(confUpdate);
+ LogMutation log = new LogMutation(kvUpdate, user.getShortUserName());
long id = confStore.logMutation(log);
- for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
+ for (Map.Entry<String, String> kv : kvUpdate.entrySet()) {
if (kv.getValue() == null) {
schedConf.unset(kv.getKey());
} else {
@@ -101,4 +123,125 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
}
confStore.confirmMutation(id, true);
}
+
+
+ private Map<String, String> constructKeyValueConfUpdate(
+ QueueConfigsUpdateInfo mutationInfo) throws IOException {
+ CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+ CapacitySchedulerConfiguration proposedConf =
+ new CapacitySchedulerConfiguration(cs.getConfiguration(), false);
+ Map<String, String> confUpdate = new HashMap<>();
+ for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
+ removeQueue(queueToRemove, proposedConf, confUpdate);
+ }
+ for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
+ addQueue(addQueueInfo, proposedConf, confUpdate);
+ }
+ for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
+ updateQueue(updateQueueInfo, proposedConf, confUpdate);
+ }
+ return confUpdate;
+ }
+
+ private void removeQueue(
+ String queueToRemove, CapacitySchedulerConfiguration proposedConf,
+ Map<String, String> confUpdate) throws IOException {
+ if (queueToRemove == null) {
+ return;
+ } else {
+ CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+ String queueName = queueToRemove.substring(
+ queueToRemove.lastIndexOf('.') + 1);
+ CSQueue queue = cs.getQueue(queueName);
+ if (queue == null ||
+ !queue.getQueuePath().equals(queueToRemove)) {
+ throw new IOException("Queue " + queueToRemove + " not found");
+ } else if (queueToRemove.lastIndexOf('.') == -1) {
+ throw new IOException("Can't remove queue " + queueToRemove);
+ }
+ String parentQueuePath = queueToRemove.substring(0, queueToRemove
+ .lastIndexOf('.'));
+ String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
+ List<String> newSiblingQueues = new ArrayList<>();
+ for (String siblingQueue : siblingQueues) {
+ if (!siblingQueue.equals(queueName)) {
+ newSiblingQueues.add(siblingQueue);
+ }
+ }
+ proposedConf.setQueues(parentQueuePath, newSiblingQueues
+ .toArray(new String[0]));
+ String queuesConfig = CapacitySchedulerConfiguration.PREFIX
+ + parentQueuePath + CapacitySchedulerConfiguration.DOT
+ + CapacitySchedulerConfiguration.QUEUES;
+ if (newSiblingQueues.size() == 0) {
+ confUpdate.put(queuesConfig, null);
+ } else {
+ confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
+ }
+ for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
+ ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
+ .entrySet()) {
+ proposedConf.unset(confRemove.getKey());
+ confUpdate.put(confRemove.getKey(), null);
+ }
+ }
+ }
+
+ private void addQueue(
+ QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
+ Map<String, String> confUpdate) throws IOException {
+ if (addInfo == null) {
+ return;
+ } else {
+ CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
+ String queuePath = addInfo.getQueue();
+ String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
+ if (cs.getQueue(queueName) != null) {
+ throw new IOException("Can't add existing queue " + queuePath);
+ } else if (queuePath.lastIndexOf('.') == -1) {
+ throw new IOException("Can't add invalid queue " + queuePath);
+ }
+ String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
+ String[] siblings = proposedConf.getQueues(parentQueue);
+ List<String> siblingQueues = siblings == null ? new ArrayList<>() :
+ new ArrayList<>(Arrays.<String>asList(siblings));
+ siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
+ proposedConf.setQueues(parentQueue,
+ siblingQueues.toArray(new String[0]));
+ confUpdate.put(CapacitySchedulerConfiguration.PREFIX
+ + parentQueue + CapacitySchedulerConfiguration.DOT
+ + CapacitySchedulerConfiguration.QUEUES,
+ Joiner.on(',').join(siblingQueues));
+ String keyPrefix = CapacitySchedulerConfiguration.PREFIX
+ + queuePath + CapacitySchedulerConfiguration.DOT;
+ for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
+ if (kv.getValue() == null) {
+ proposedConf.unset(keyPrefix + kv.getKey());
+ } else {
+ proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ }
+ }
+
+ private void updateQueue(QueueConfigInfo updateInfo,
+ CapacitySchedulerConfiguration proposedConf,
+ Map<String, String> confUpdate) {
+ if (updateInfo == null) {
+ return;
+ } else {
+ String queuePath = updateInfo.getQueue();
+ String keyPrefix = CapacitySchedulerConfiguration.PREFIX
+ + queuePath + CapacitySchedulerConfiguration.DOT;
+ for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
+ if (kv.getValue() == null) {
+ proposedConf.unset(keyPrefix + kv.getKey());
+ } else {
+ proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
new file mode 100644
index 0000000..1f94c1c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * A configuration mutation ACL policy which checks that user has admin
+ * privileges on all queues they are changing.
+ */
+public class QueueAdminConfigurationMutationACLPolicy implements
+ ConfigurationMutationACLPolicy {
+
+ private RMContext rmContext;
+
+ @Override
+ public void init(Configuration conf, RMContext context) {
+ this.rmContext = context;
+ }
+
+ @Override
+ public boolean isMutationAllowed(UserGroupInformation user,
+ QueueConfigsUpdateInfo confUpdate) {
+ Set<String> queues = new HashSet<>();
+ for (QueueConfigInfo addQueueInfo : confUpdate.getAddQueueInfo()) {
+ queues.add(addQueueInfo.getQueue());
+ }
+ for (String removeQueue : confUpdate.getRemoveQueueInfo()) {
+ queues.add(removeQueue);
+ }
+ for (QueueConfigInfo updateQueueInfo : confUpdate.getUpdateQueueInfo()) {
+ queues.add(updateQueueInfo.getQueue());
+ }
+ for (String queuePath : queues) {
+ String queueName = queuePath.lastIndexOf('.') != -1 ?
+ queuePath.substring(queuePath.lastIndexOf('.') + 1) : queuePath;
+ QueueInfo queueInfo = null;
+ try {
+ queueInfo = rmContext.getScheduler()
+ .getQueueInfo(queueName, false, false);
+ } catch (IOException e) {
+ // Queue is not found, do nothing.
+ }
+ String parentPath = queuePath;
+ // TODO: handle global config change.
+ while (queueInfo == null) {
+ // We are adding a queue (whose parent we are possibly also adding).
+ // Check ACL of lowest parent queue which already exists.
+ parentPath = parentPath.substring(0, parentPath.lastIndexOf('.'));
+ String parentName = parentPath.lastIndexOf('.') != -1 ?
+ parentPath.substring(parentPath.lastIndexOf('.') + 1) : parentPath;
+ try {
+ queueInfo = rmContext.getScheduler()
+ .getQueueInfo(parentName, false, false);
+ } catch (IOException e) {
+ // Queue is not found, do nothing.
+ }
+ }
+ Queue queue = ((MutableConfScheduler) rmContext.getScheduler())
+ .getQueue(queueInfo.getQueueName());
+ if (queue != null && !queue.hasAccess(QueueACL.ADMINISTER_QUEUE, user)) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 56a0bf8..d670748 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -135,7 +135,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
@@ -2434,10 +2433,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException, YarnException {
- Map<String, String> confUpdate =
- constructKeyValueConfUpdate(mutationInfo);
- ((CapacityScheduler) scheduler).updateConfiguration(callerUGI,
- confUpdate);
+ ((MutableConfScheduler) scheduler).updateConfiguration(callerUGI,
+ mutationInfo);
return null;
}
});
@@ -2449,129 +2446,9 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
"successfully applied.").build();
} else {
return Response.status(Status.BAD_REQUEST)
- .entity("Configuration change only supported by CapacityScheduler.")
+ .entity("Configuration change only supported by " +
+ "MutableConfScheduler.")
.build();
}
}
-
- private Map<String, String> constructKeyValueConfUpdate(
- QueueConfigsUpdateInfo mutationInfo) throws IOException {
- CapacitySchedulerConfiguration currentConf =
- ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
- CapacitySchedulerConfiguration proposedConf =
- new CapacitySchedulerConfiguration(currentConf, false);
- Map<String, String> confUpdate = new HashMap<>();
- for (String queueToRemove : mutationInfo.getRemoveQueueInfo()) {
- removeQueue(queueToRemove, proposedConf, confUpdate);
- }
- for (QueueConfigInfo addQueueInfo : mutationInfo.getAddQueueInfo()) {
- addQueue(addQueueInfo, proposedConf, confUpdate);
- }
- for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
- updateQueue(updateQueueInfo, proposedConf, confUpdate);
- }
- return confUpdate;
- }
-
- private void removeQueue(
- String queueToRemove, CapacitySchedulerConfiguration proposedConf,
- Map<String, String> confUpdate) throws IOException {
- if (queueToRemove == null) {
- return;
- } else {
- CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
- String queueName = queueToRemove.substring(
- queueToRemove.lastIndexOf('.') + 1);
- CSQueue queue = cs.getQueue(queueName);
- if (queue == null ||
- !queue.getQueuePath().equals(queueToRemove)) {
- throw new IOException("Queue " + queueToRemove + " not found");
- } else if (queueToRemove.lastIndexOf('.') == -1) {
- throw new IOException("Can't remove queue " + queueToRemove);
- }
- String parentQueuePath = queueToRemove.substring(0, queueToRemove
- .lastIndexOf('.'));
- String[] siblingQueues = proposedConf.getQueues(parentQueuePath);
- List<String> newSiblingQueues = new ArrayList<>();
- for (String siblingQueue : siblingQueues) {
- if (!siblingQueue.equals(queueName)) {
- newSiblingQueues.add(siblingQueue);
- }
- }
- proposedConf.setQueues(parentQueuePath, newSiblingQueues
- .toArray(new String[0]));
- String queuesConfig = CapacitySchedulerConfiguration.PREFIX +
- parentQueuePath + CapacitySchedulerConfiguration.DOT +
- CapacitySchedulerConfiguration.QUEUES;
- if (newSiblingQueues.size() == 0) {
- confUpdate.put(queuesConfig, null);
- } else {
- confUpdate.put(queuesConfig, Joiner.on(',').join(newSiblingQueues));
- }
- for (Map.Entry<String, String> confRemove : proposedConf.getValByRegex(
- ".*" + queueToRemove.replaceAll("\\.", "\\.") + "\\..*")
- .entrySet()) {
- proposedConf.unset(confRemove.getKey());
- confUpdate.put(confRemove.getKey(), null);
- }
- }
- }
-
- private void addQueue(
- QueueConfigInfo addInfo, CapacitySchedulerConfiguration proposedConf,
- Map<String, String> confUpdate) throws IOException {
- if (addInfo == null) {
- return;
- } else {
- CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
- String queuePath = addInfo.getQueue();
- String queueName = queuePath.substring(queuePath.lastIndexOf('.') + 1);
- if (cs.getQueue(queueName) != null) {
- throw new IOException("Can't add existing queue " + queuePath);
- } else if (queuePath.lastIndexOf('.') == -1) {
- throw new IOException("Can't add invalid queue " + queuePath);
- }
- String parentQueue = queuePath.substring(0, queuePath.lastIndexOf('.'));
- String[] siblings = proposedConf.getQueues(parentQueue);
- List<String> siblingQueues = siblings == null ? new ArrayList<>() :
- new ArrayList<>(Arrays.<String>asList(siblings));
- siblingQueues.add(queuePath.substring(queuePath.lastIndexOf('.') + 1));
- proposedConf.setQueues(parentQueue,
- siblingQueues.toArray(new String[0]));
- confUpdate.put(CapacitySchedulerConfiguration.PREFIX +
- parentQueue + CapacitySchedulerConfiguration.DOT +
- CapacitySchedulerConfiguration.QUEUES,
- Joiner.on(',').join(siblingQueues));
- String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
- queuePath + CapacitySchedulerConfiguration.DOT;
- for (Map.Entry<String, String> kv : addInfo.getParams().entrySet()) {
- if (kv.getValue() == null) {
- proposedConf.unset(keyPrefix + kv.getKey());
- } else {
- proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
- }
- confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
- }
- }
- }
-
- private void updateQueue(QueueConfigInfo updateInfo,
- CapacitySchedulerConfiguration proposedConf,
- Map<String, String> confUpdate) {
- if (updateInfo == null) {
- return;
- } else {
- String queuePath = updateInfo.getQueue();
- String keyPrefix = CapacitySchedulerConfiguration.PREFIX +
- queuePath + CapacitySchedulerConfiguration.DOT;
- for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
- if (kv.getValue() == null) {
- proposedConf.unset(keyPrefix + kv.getKey());
- } else {
- proposedConf.set(keyPrefix + kv.getKey(), kv.getValue());
- }
- confUpdate.put(keyPrefix + kv.getKey(), kv.getValue());
- }
- }
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
new file mode 100644
index 0000000..4016dcf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestConfigurationMutationACLPolicies {
+
+ private ConfigurationMutationACLPolicy policy;
+ private RMContext rmContext;
+ private MutableConfScheduler scheduler;
+
+ private static final UserGroupInformation GOOD_USER = UserGroupInformation
+ .createUserForTesting("goodUser", new String[] {});
+ private static final UserGroupInformation BAD_USER = UserGroupInformation
+ .createUserForTesting("badUser", new String[] {});
+ private static final Map<String, String> EMPTY_MAP =
+ Collections.<String, String>emptyMap();
+
+ @Before
+ public void setUp() throws IOException {
+ rmContext = mock(RMContext.class);
+ scheduler = mock(MutableConfScheduler.class);
+ when(rmContext.getScheduler()).thenReturn(scheduler);
+ mockQueue("a", scheduler);
+ mockQueue("b", scheduler);
+ mockQueue("b1", scheduler);
+ }
+
+ private void mockQueue(String queueName, MutableConfScheduler scheduler)
+ throws IOException {
+ QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null,
+ null, null, null, null, false);
+ when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
+ .thenReturn(queueInfo);
+ Queue queue = mock(Queue.class);
+ when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(GOOD_USER)))
+ .thenReturn(true);
+ when(queue.hasAccess(eq(QueueACL.ADMINISTER_QUEUE), eq(BAD_USER)))
+ .thenReturn(false);
+ when(scheduler.getQueue(eq(queueName))).thenReturn(queue);
+ }
+ @Test
+ public void testDefaultPolicy() {
+ Configuration conf = new Configuration();
+ conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ DefaultConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+ policy.init(conf, rmContext);
+ assertTrue(policy.isMutationAllowed(GOOD_USER, null));
+ assertFalse(policy.isMutationAllowed(BAD_USER, null));
+ }
+
+ @Test
+ public void testQueueAdminBasedPolicy() {
+ Configuration conf = new Configuration();
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ QueueAdminConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+ policy.init(conf, rmContext);
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ QueueConfigInfo configInfo = new QueueConfigInfo("root.a", EMPTY_MAP);
+ updateInfo.getUpdateQueueInfo().add(configInfo);
+ assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+ assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+ }
+
+ @Test
+ public void testQueueAdminPolicyAddQueue() {
+ Configuration conf = new Configuration();
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ QueueAdminConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+ policy.init(conf, rmContext);
+ // Add root.b.b1. Should check ACL of root.b queue.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2", EMPTY_MAP);
+ updateInfo.getAddQueueInfo().add(configInfo);
+ assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+ assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+ }
+
+ @Test
+ public void testQueueAdminPolicyAddNestedQueue() {
+ Configuration conf = new Configuration();
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ QueueAdminConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+ policy.init(conf, rmContext);
+ // Add root.b.b1.b11. Should check ACL of root.b queue.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2.b21", EMPTY_MAP);
+ updateInfo.getAddQueueInfo().add(configInfo);
+ assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+ assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+ }
+
+ @Test
+ public void testQueueAdminPolicyRemoveQueue() {
+ Configuration conf = new Configuration();
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ QueueAdminConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+ policy.init(conf, rmContext);
+ // Remove root.b.b1.
+ QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ updateInfo.getRemoveQueueInfo().add("root.b.b1");
+ assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+ assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c1dcf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 254da31..13229b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -19,8 +19,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
import org.junit.Before;
import org.junit.Test;
@@ -43,22 +47,34 @@ public class TestMutableCSConfigurationProvider {
private MutableCSConfigurationProvider confProvider;
private RMContext rmContext;
- private Map<String, String> goodUpdate;
- private Map<String, String> badUpdate;
+ private QueueConfigsUpdateInfo goodUpdate;
+ private QueueConfigsUpdateInfo badUpdate;
private CapacityScheduler cs;
- private static final String TEST_USER = "testUser";
+ private static final UserGroupInformation TEST_USER = UserGroupInformation
+ .createUserForTesting("testUser", new String[] {});
@Before
public void setUp() {
cs = mock(CapacityScheduler.class);
rmContext = mock(RMContext.class);
when(rmContext.getScheduler()).thenReturn(cs);
+ when(cs.getConfiguration()).thenReturn(
+ new CapacitySchedulerConfiguration());
confProvider = new MutableCSConfigurationProvider(rmContext);
- goodUpdate = new HashMap<>();
- goodUpdate.put("goodKey", "goodVal");
- badUpdate = new HashMap<>();
- badUpdate.put("badKey", "badVal");
+ goodUpdate = new QueueConfigsUpdateInfo();
+ Map<String, String> goodUpdateMap = new HashMap<>();
+ goodUpdateMap.put("goodKey", "goodVal");
+ QueueConfigInfo goodUpdateInfo = new
+ QueueConfigInfo("root.a", goodUpdateMap);
+ goodUpdate.getUpdateQueueInfo().add(goodUpdateInfo);
+
+ badUpdate = new QueueConfigsUpdateInfo();
+ Map<String, String> badUpdateMap = new HashMap<>();
+ badUpdateMap.put("badKey", "badVal");
+ QueueConfigInfo badUpdateInfo = new
+ QueueConfigInfo("root.a", badUpdateMap);
+ badUpdate.getUpdateQueueInfo().add(badUpdateInfo);
}
@Test
@@ -66,15 +82,16 @@ public class TestMutableCSConfigurationProvider {
Configuration conf = new Configuration();
confProvider.init(conf);
assertNull(confProvider.loadConfiguration(conf)
- .get("goodKey"));
+ .get("yarn.scheduler.capacity.root.a.goodKey"));
doNothing().when(cs).reinitialize(any(Configuration.class),
any(RMContext.class));
confProvider.mutateConfiguration(TEST_USER, goodUpdate);
assertEquals("goodVal", confProvider.loadConfiguration(conf)
- .get("goodKey"));
+ .get("yarn.scheduler.capacity.root.a.goodKey"));
- assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+ assertNull(confProvider.loadConfiguration(conf).get(
+ "yarn.scheduler.capacity.root.a.badKey"));
doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
any(RMContext.class));
try {
@@ -82,6 +99,7 @@ public class TestMutableCSConfigurationProvider {
} catch (IOException e) {
// Expected exception.
}
- assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+ assertNull(confProvider.loadConfiguration(conf).get(
+ "yarn.scheduler.capacity.root.a.badKey"));
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[19/20] hadoop git commit: YARN-5953 addendum: Move QueueConfigInfo
and SchedConfUpdateInfo to package org.apache.hadoop.yarn.webapp.dao
Posted by xg...@apache.org.
YARN-5953 addendum: Move QueueConfigInfo and SchedConfUpdateInfo to package org.apache.hadoop.yarn.webapp.dao
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79701d92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79701d92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79701d92
Branch: refs/heads/YARN-5734
Commit: 79701d924a00dbfd1a78667596dd54f525de457f
Parents: f15309e
Author: Xuan <xg...@apache.org>
Authored: Mon Jul 31 11:49:05 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:43 2017 -0700
----------------------------------------------------------------------
.../hadoop/yarn/webapp/dao/QueueConfigInfo.java | 57 +++++++++++++
.../yarn/webapp/dao/SchedConfUpdateInfo.java | 85 ++++++++++++++++++++
.../webapp/dao/QueueConfigInfo.java | 57 -------------
.../webapp/dao/SchedConfUpdateInfo.java | 85 --------------------
4 files changed, 142 insertions(+), 142 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79701d92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
new file mode 100644
index 0000000..d1d91c2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/QueueConfigInfo.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp.dao;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for adding or updating a queue to scheduler configuration
+ * for this queue.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class QueueConfigInfo {
+
+ @XmlElement(name = "queueName")
+ private String queue;
+
+ private HashMap<String, String> params = new HashMap<>();
+
+ public QueueConfigInfo() { }
+
+ public QueueConfigInfo(String queue, Map<String, String> params) {
+ this.queue = queue;
+ this.params = new HashMap<>(params);
+ }
+
+ public String getQueue() {
+ return this.queue;
+ }
+
+ public HashMap<String, String> getParams() {
+ return this.params;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79701d92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
new file mode 100644
index 0000000..bb84096
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/SchedConfUpdateInfo.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp.dao;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for making scheduler configuration changes (supports adding,
+ * removing, or updating a queue, as well as global scheduler conf changes).
+ */
+@XmlRootElement(name = "schedConf")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class SchedConfUpdateInfo {
+
+ @XmlElement(name = "add-queue")
+ private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
+
+ @XmlElement(name = "remove-queue")
+ private ArrayList<String> removeQueueInfo = new ArrayList<>();
+
+ @XmlElement(name = "update-queue")
+ private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
+
+ private HashMap<String, String> global = new HashMap<>();
+
+ public SchedConfUpdateInfo() {
+ // JAXB needs this
+ }
+
+ public ArrayList<QueueConfigInfo> getAddQueueInfo() {
+ return addQueueInfo;
+ }
+
+ public void setAddQueueInfo(ArrayList<QueueConfigInfo> addQueueInfo) {
+ this.addQueueInfo = addQueueInfo;
+ }
+
+ public ArrayList<String> getRemoveQueueInfo() {
+ return removeQueueInfo;
+ }
+
+ public void setRemoveQueueInfo(ArrayList<String> removeQueueInfo) {
+ this.removeQueueInfo = removeQueueInfo;
+ }
+
+ public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
+ return updateQueueInfo;
+ }
+
+ public void setUpdateQueueInfo(ArrayList<QueueConfigInfo> updateQueueInfo) {
+ this.updateQueueInfo = updateQueueInfo;
+ }
+
+ @XmlElementWrapper(name = "global-updates")
+ public HashMap<String, String> getGlobalParams() {
+ return global;
+ }
+
+ public void setGlobalParams(HashMap<String, String> globalInfo) {
+ this.global = globalInfo;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79701d92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
deleted file mode 100644
index d1d91c2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.webapp.dao;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Information for adding or updating a queue to scheduler configuration
- * for this queue.
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class QueueConfigInfo {
-
- @XmlElement(name = "queueName")
- private String queue;
-
- private HashMap<String, String> params = new HashMap<>();
-
- public QueueConfigInfo() { }
-
- public QueueConfigInfo(String queue, Map<String, String> params) {
- this.queue = queue;
- this.params = new HashMap<>(params);
- }
-
- public String getQueue() {
- return this.queue;
- }
-
- public HashMap<String, String> getParams() {
- return this.params;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79701d92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
deleted file mode 100644
index bb84096..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.webapp.dao;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlElementWrapper;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Information for making scheduler configuration changes (supports adding,
- * removing, or updating a queue, as well as global scheduler conf changes).
- */
-@XmlRootElement(name = "schedConf")
-@XmlAccessorType(XmlAccessType.FIELD)
-public class SchedConfUpdateInfo {
-
- @XmlElement(name = "add-queue")
- private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
-
- @XmlElement(name = "remove-queue")
- private ArrayList<String> removeQueueInfo = new ArrayList<>();
-
- @XmlElement(name = "update-queue")
- private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
-
- private HashMap<String, String> global = new HashMap<>();
-
- public SchedConfUpdateInfo() {
- // JAXB needs this
- }
-
- public ArrayList<QueueConfigInfo> getAddQueueInfo() {
- return addQueueInfo;
- }
-
- public void setAddQueueInfo(ArrayList<QueueConfigInfo> addQueueInfo) {
- this.addQueueInfo = addQueueInfo;
- }
-
- public ArrayList<String> getRemoveQueueInfo() {
- return removeQueueInfo;
- }
-
- public void setRemoveQueueInfo(ArrayList<String> removeQueueInfo) {
- this.removeQueueInfo = removeQueueInfo;
- }
-
- public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
- return updateQueueInfo;
- }
-
- public void setUpdateQueueInfo(ArrayList<QueueConfigInfo> updateQueueInfo) {
- this.updateQueueInfo = updateQueueInfo;
- }
-
- @XmlElementWrapper(name = "global-updates")
- public HashMap<String, String> getGlobalParams() {
- return global;
- }
-
- public void setGlobalParams(HashMap<String, String> globalInfo) {
- this.global = globalInfo;
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/20] hadoop git commit: YARN-5953:Create CLI for changing YARN
configurations. (Jonathan Hung via xgong)
Posted by xg...@apache.org.
YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f15309e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f15309e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f15309e4
Branch: refs/heads/YARN-5734
Commit: f15309e42e72045db79fbb9f6a91353f317ea02d
Parents: 087477c
Author: Xuan <xg...@apache.org>
Authored: Fri Jul 7 14:16:46 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:42 2017 -0700
----------------------------------------------------------------------
hadoop-yarn-project/hadoop-yarn/bin/yarn | 4 +
hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 5 +
.../hadoop/yarn/client/cli/SchedConfCLI.java | 238 +++++++++++++++++++
.../yarn/client/cli/TestSchedConfCLI.java | 160 +++++++++++++
.../hadoop/yarn/webapp/dao/package-info.java | 27 +++
.../yarn/webapp/util/YarnWebServiceUtils.java | 14 ++
.../ConfigurationMutationACLPolicy.java | 2 +-
.../DefaultConfigurationMutationACLPolicy.java | 2 +-
.../scheduler/MutableConfScheduler.java | 2 +-
.../scheduler/MutableConfigurationProvider.java | 2 +-
.../scheduler/capacity/CapacityScheduler.java | 2 +-
.../conf/MutableCSConfigurationProvider.java | 4 +-
...ueueAdminConfigurationMutationACLPolicy.java | 4 +-
.../resourcemanager/webapp/RMWebServices.java | 1 +
.../webapp/dao/QueueConfigInfo.java | 4 +-
.../webapp/dao/SchedConfUpdateInfo.java | 18 +-
.../TestConfigurationMutationACLPolicies.java | 4 +-
.../TestMutableCSConfigurationProvider.java | 4 +-
.../TestRMWebServicesConfigurationMutation.java | 65 +++--
19 files changed, 508 insertions(+), 54 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index cf6457b..21656fe 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -46,6 +46,7 @@ function hadoop_usage
hadoop_add_subcommand "queue" "prints queue information"
hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
hadoop_add_subcommand "rmadmin" "admin tools"
+ hadoop_add_subcommand "schedconf" "modify scheduler configuration"
hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager daemon"
hadoop_add_subcommand "timelinereader" "run the timeline reader server"
@@ -137,6 +138,9 @@ function yarncmd_case
rmadmin)
HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
;;
+ schedconf)
+ HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.SchedConfCLI'
+ ;;
scmadmin)
HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
;;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index ca879f5..8b72394 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -285,6 +285,11 @@ goto :eof
set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
goto :eof
+:schedconf
+ set CLASS=org.apache.hadoop.yarn.client.cli.SchedConfCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
:make_command_arguments
if "%1" == "--config" (
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
new file mode 100644
index 0000000..e17062e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -0,0 +1,238 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client.cli;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.MissingArgumentException;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response.Status;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * CLI for modifying scheduler configuration.
+ */
+@Public
+@Evolving
+public class SchedConfCLI extends Configured implements Tool {
+
+ private static final String ADD_QUEUES_OPTION = "addQueues";
+ private static final String REMOVE_QUEUES_OPTION = "removeQueues";
+ private static final String UPDATE_QUEUES_OPTION = "updateQueues";
+ private static final String GLOBAL_OPTIONS = "globalUpdates";
+ private static final String HELP_CMD = "help";
+
+ private static final String CONF_ERR_MSG = "Specify configuration key " +
+ "value as confKey=confVal.";
+
+ public SchedConfCLI() {
+ super(new YarnConfiguration());
+ }
+
+ public static void main(String[] args) throws Exception {
+ SchedConfCLI cli = new SchedConfCLI();
+ int exitCode = cli.run(args);
+ System.exit(exitCode);
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ Options opts = new Options();
+
+ opts.addOption("add", ADD_QUEUES_OPTION, true,
+ "Add queues with configurations");
+ opts.addOption("remove", REMOVE_QUEUES_OPTION, true,
+ "Remove queues");
+ opts.addOption("update", UPDATE_QUEUES_OPTION, true,
+ "Update queue configurations");
+ opts.addOption("global", GLOBAL_OPTIONS, true,
+ "Update global scheduler configurations");
+ opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
+
+ int exitCode = -1;
+ CommandLine parsedCli = null;
+ try {
+ parsedCli = new GnuParser().parse(opts, args);
+ } catch (MissingArgumentException ex) {
+ System.err.println("Missing argument for options");
+ printUsage();
+ return exitCode;
+ }
+
+ if (parsedCli.hasOption(HELP_CMD)) {
+ printUsage();
+ return 0;
+ }
+
+ boolean hasOption = false;
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
+ try {
+ if (parsedCli.hasOption(ADD_QUEUES_OPTION)) {
+ hasOption = true;
+ addQueues(parsedCli.getOptionValue(ADD_QUEUES_OPTION), updateInfo);
+ }
+ if (parsedCli.hasOption(REMOVE_QUEUES_OPTION)) {
+ hasOption = true;
+ removeQueues(parsedCli.getOptionValue(REMOVE_QUEUES_OPTION),
+ updateInfo);
+ }
+ if (parsedCli.hasOption(UPDATE_QUEUES_OPTION)) {
+ hasOption = true;
+ updateQueues(parsedCli.getOptionValue(UPDATE_QUEUES_OPTION),
+ updateInfo);
+ }
+ if (parsedCli.hasOption(GLOBAL_OPTIONS)) {
+ hasOption = true;
+ globalUpdates(parsedCli.getOptionValue(GLOBAL_OPTIONS), updateInfo);
+ }
+ } catch (IllegalArgumentException e) {
+ System.err.println(e.getMessage());
+ return -1;
+ }
+
+ if (!hasOption) {
+ System.err.println("Invalid Command Usage: ");
+ printUsage();
+ return -1;
+ }
+
+ Client webServiceClient = Client.create();
+ WebResource webResource = webServiceClient.resource(WebAppUtils.
+ getRMWebAppURLWithScheme(getConf()));
+ ClientResponse response = webResource.path("ws").path("v1").path("cluster")
+ .path("sched-conf").accept(MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ if (response != null) {
+ if (response.getStatus() == Status.OK.getStatusCode()) {
+ System.out.println("Configuration changed successfully.");
+ return 0;
+ } else {
+ System.err.println("Configuration change unsuccessful: "
+ + response.getEntity(String.class));
+ }
+ } else {
+ System.err.println("Configuration change unsuccessful: null response");
+ }
+ return -1;
+ }
+
+ @VisibleForTesting
+ void addQueues(String args, SchedConfUpdateInfo updateInfo) {
+ if (args == null) {
+ return;
+ }
+ ArrayList<QueueConfigInfo> queueConfigInfos = new ArrayList<>();
+ for (String arg : args.split(";")) {
+ queueConfigInfos.add(getQueueConfigInfo(arg));
+ }
+ updateInfo.setAddQueueInfo(queueConfigInfos);
+ }
+
+ @VisibleForTesting
+ void removeQueues(String args, SchedConfUpdateInfo updateInfo) {
+ if (args == null) {
+ return;
+ }
+ List<String> queuesToRemove = Arrays.asList(args.split(","));
+ updateInfo.setRemoveQueueInfo(new ArrayList<>(queuesToRemove));
+ }
+
+ @VisibleForTesting
+ void updateQueues(String args, SchedConfUpdateInfo updateInfo) {
+ if (args == null) {
+ return;
+ }
+ ArrayList<QueueConfigInfo> queueConfigInfos = new ArrayList<>();
+ for (String arg : args.split(";")) {
+ queueConfigInfos.add(getQueueConfigInfo(arg));
+ }
+ updateInfo.setUpdateQueueInfo(queueConfigInfos);
+ }
+
+ @VisibleForTesting
+ void globalUpdates(String args, SchedConfUpdateInfo updateInfo) {
+ if (args == null) {
+ return;
+ }
+ HashMap<String, String> globalUpdates = new HashMap<>();
+ for (String globalUpdate : args.split(",")) {
+ putKeyValuePair(globalUpdates, globalUpdate);
+ }
+ updateInfo.setGlobalParams(globalUpdates);
+ }
+
+ private QueueConfigInfo getQueueConfigInfo(String arg) {
+ String[] queueArgs = arg.split(",");
+ String queuePath = queueArgs[0];
+ Map<String, String> queueConfigs = new HashMap<>();
+ for (int i = 1; i < queueArgs.length; ++i) {
+ putKeyValuePair(queueConfigs, queueArgs[i]);
+ }
+ return new QueueConfigInfo(queuePath, queueConfigs);
+ }
+
+ private void putKeyValuePair(Map<String, String> kv, String args) {
+ String[] argParts = args.split("=");
+ if (argParts.length == 1) {
+ if (argParts[0].isEmpty() || !args.contains("=")) {
+ throw new IllegalArgumentException(CONF_ERR_MSG);
+ } else {
+ // key specified, but no value e.g. "confKey="
+ kv.put(argParts[0], null);
+ }
+ } else if (argParts.length > 2) {
+ throw new IllegalArgumentException(CONF_ERR_MSG);
+ } else {
+ if (argParts[0].isEmpty()) {
+ throw new IllegalArgumentException(CONF_ERR_MSG);
+ }
+ kv.put(argParts[0], argParts[1]);
+ }
+ }
+
+ private void printUsage() {
+ System.out.println("yarn schedconf [-add queueAddPath1,confKey1=confVal1,"
+ + "confKey2=confVal2;queueAddPath2,confKey3=confVal3] "
+ + "[-remove queueRemovePath1,queueRemovePath2] "
+ + "[-update queueUpdatePath1,confKey1=confVal1] "
+ + "[-global globalConfKey1=globalConfVal1,"
+ + "globalConfKey2=globalConfVal2]");
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
new file mode 100644
index 0000000..d2f0639
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
@@ -0,0 +1,160 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client.cli;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Class for testing {@link SchedConfCLI}.
+ */
+public class TestSchedConfCLI {
+
+ private ByteArrayOutputStream sysOutStream;
+ private PrintStream sysOut;
+
+ private ByteArrayOutputStream sysErrStream;
+ private PrintStream sysErr;
+
+ private SchedConfCLI cli;
+
+ @Before
+ public void setUp() {
+ sysOutStream = new ByteArrayOutputStream();
+ sysOut = new PrintStream(sysOutStream);
+ System.setOut(sysOut);
+
+ sysErrStream = new ByteArrayOutputStream();
+ sysErr = new PrintStream(sysErrStream);
+ System.setErr(sysErr);
+
+ cli = new SchedConfCLI();
+ }
+
+ @Test(timeout = 10000)
+ public void testInvalidConf() throws Exception {
+ // conf pair with no key should be invalid
+ int exitCode = cli.run(new String[] {"-add", "root.a,=confVal"});
+ assertTrue("Should return an error code", exitCode != 0);
+ assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+ "value as confKey=confVal."));
+ exitCode = cli.run(new String[] {"-update", "root.a,=confVal"});
+ assertTrue("Should return an error code", exitCode != 0);
+ assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+ "value as confKey=confVal."));
+
+ exitCode = cli.run(new String[] {"-add", "root.a,confKey=confVal=conf"});
+ assertTrue("Should return an error code", exitCode != 0);
+ assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+ "value as confKey=confVal."));
+ exitCode = cli.run(new String[] {"-update", "root.a,confKey=confVal=c"});
+ assertTrue("Should return an error code", exitCode != 0);
+ assertTrue(sysErrStream.toString().contains("Specify configuration key " +
+ "value as confKey=confVal."));
+ }
+
+ @Test(timeout = 10000)
+ public void testAddQueues() {
+ SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+ cli.addQueues("root.a,a1=aVal1,a2=aVal2," +
+ "a3=", schedUpdateInfo);
+ QueueConfigInfo addInfo = schedUpdateInfo.getAddQueueInfo().get(0);
+ assertEquals("root.a", addInfo.getQueue());
+ Map<String, String> params = addInfo.getParams();
+ assertEquals(3, params.size());
+ assertEquals("aVal1", params.get("a1"));
+ assertEquals("aVal2", params.get("a2"));
+ assertNull(params.get("a3"));
+
+ schedUpdateInfo = new SchedConfUpdateInfo();
+ cli.addQueues("root.b,b1=bVal1;root.c,c1=cVal1", schedUpdateInfo);
+ assertEquals(2, schedUpdateInfo.getAddQueueInfo().size());
+ QueueConfigInfo bAddInfo = schedUpdateInfo.getAddQueueInfo().get(0);
+ assertEquals("root.b", bAddInfo.getQueue());
+ Map<String, String> bParams = bAddInfo.getParams();
+ assertEquals(1, bParams.size());
+ assertEquals("bVal1", bParams.get("b1"));
+ QueueConfigInfo cAddInfo = schedUpdateInfo.getAddQueueInfo().get(1);
+ assertEquals("root.c", cAddInfo.getQueue());
+ Map<String, String> cParams = cAddInfo.getParams();
+ assertEquals(1, cParams.size());
+ assertEquals("cVal1", cParams.get("c1"));
+ }
+
+ @Test(timeout = 10000)
+ public void testRemoveQueues() {
+ SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+ cli.removeQueues("root.a,root.b,root.c.c1", schedUpdateInfo);
+ List<String> removeInfo = schedUpdateInfo.getRemoveQueueInfo();
+ assertEquals(3, removeInfo.size());
+ assertEquals("root.a", removeInfo.get(0));
+ assertEquals("root.b", removeInfo.get(1));
+ assertEquals("root.c.c1", removeInfo.get(2));
+ }
+
+ @Test(timeout = 10000)
+ public void testUpdateQueues() {
+ SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+ cli.updateQueues("root.a,a1=aVal1,a2=aVal2," +
+ "a3=", schedUpdateInfo);
+ QueueConfigInfo updateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0);
+ assertEquals("root.a", updateInfo.getQueue());
+ Map<String, String> params = updateInfo.getParams();
+ assertEquals(3, params.size());
+ assertEquals("aVal1", params.get("a1"));
+ assertEquals("aVal2", params.get("a2"));
+ assertNull(params.get("a3"));
+
+ schedUpdateInfo = new SchedConfUpdateInfo();
+ cli.updateQueues("root.b,b1=bVal1;root.c,c1=cVal1", schedUpdateInfo);
+ assertEquals(2, schedUpdateInfo.getUpdateQueueInfo().size());
+ QueueConfigInfo bUpdateInfo = schedUpdateInfo.getUpdateQueueInfo().get(0);
+ assertEquals("root.b", bUpdateInfo.getQueue());
+ Map<String, String> bParams = bUpdateInfo.getParams();
+ assertEquals(1, bParams.size());
+ assertEquals("bVal1", bParams.get("b1"));
+ QueueConfigInfo cUpdateInfo = schedUpdateInfo.getUpdateQueueInfo().get(1);
+ assertEquals("root.c", cUpdateInfo.getQueue());
+ Map<String, String> cParams = cUpdateInfo.getParams();
+ assertEquals(1, cParams.size());
+ assertEquals("cVal1", cParams.get("c1"));
+ }
+
+ @Test(timeout = 10000)
+ public void testGlobalUpdate() {
+ SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+ cli.globalUpdates("schedKey1=schedVal1,schedKey2=schedVal2",
+ schedUpdateInfo);
+ Map<String, String> globalInfo = schedUpdateInfo.getGlobalParams();
+ assertEquals(2, globalInfo.size());
+ assertEquals("schedVal1", globalInfo.get("schedKey1"));
+ assertEquals("schedVal2", globalInfo.get("schedKey2"));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java
new file mode 100644
index 0000000..aec6762
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/dao/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Data structures for scheduler configuration mutation info.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.webapp.dao;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
index 4167e21..1cf1e97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
@@ -23,9 +23,14 @@ import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import javax.ws.rs.core.MediaType;
+
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
import org.apache.hadoop.conf.Configuration;
import org.codehaus.jettison.json.JSONObject;
+import java.io.StringWriter;
+
/**
* This class contains several utility function which could be used to generate
* Restful calls to RM/NM/AHS.
@@ -59,4 +64,13 @@ public final class YarnWebServiceUtils {
.get(ClientResponse.class);
return response.getEntity(JSONObject.class);
}
+
+ @SuppressWarnings("rawtypes")
+ public static String toJson(Object nsli, Class klass) throws Exception {
+ StringWriter sw = new StringWriter();
+ JSONJAXBContext ctx = new JSONJAXBContext(klass);
+ JSONMarshaller jm = ctx.createJSONMarshaller();
+ jm.marshallToJSON(nsli, sw);
+ return sw.toString();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
index 3a388fe..5bc5874 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
/**
* Interface for determining whether configuration mutations are allowed.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
index 6648668..1de6f6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
/**
* Default configuration mutation ACL policy. Checks if user is YARN admin.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 027d944..007dc29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 6b8306c..86be7c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 6f637a9..8a54013 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -137,11 +137,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index eb97260..670c0f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
import java.util.ArrayList;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
index 0a82d50..ee53fd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
import java.util.HashSet;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index ae1ebad..798b93f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -192,6 +192,7 @@ import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.ForbiddenException;
import org.apache.hadoop.yarn.webapp.NotFoundException;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.inject.Inject;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
index b20eda6..d1d91c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigInfo.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+package org.apache.hadoop.yarn.webapp.dao;
import java.util.HashMap;
import java.util.Map;
@@ -54,4 +54,4 @@ public class QueueConfigInfo {
return this.params;
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
index b7c585e..bb84096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+package org.apache.hadoop.yarn.webapp.dao;
import java.util.ArrayList;
import java.util.HashMap;
@@ -54,16 +54,32 @@ public class SchedConfUpdateInfo {
return addQueueInfo;
}
+ public void setAddQueueInfo(ArrayList<QueueConfigInfo> addQueueInfo) {
+ this.addQueueInfo = addQueueInfo;
+ }
+
public ArrayList<String> getRemoveQueueInfo() {
return removeQueueInfo;
}
+ public void setRemoveQueueInfo(ArrayList<String> removeQueueInfo) {
+ this.removeQueueInfo = removeQueueInfo;
+ }
+
public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
return updateQueueInfo;
}
+ public void setUpdateQueueInfo(ArrayList<QueueConfigInfo> updateQueueInfo) {
+ this.updateQueueInfo = updateQueueInfo;
+ }
+
@XmlElementWrapper(name = "global-updates")
public HashMap<String, String> getGlobalParams() {
return global;
}
+
+ public void setGlobalParams(HashMap<String, String> globalInfo) {
+ this.global = globalInfo;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 0f5a3d8..398e909 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -25,8 +25,8 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import org.junit.Before;
import org.junit.Test;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 3216781..9104f16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -23,8 +23,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import org.junit.Before;
import org.junit.Test;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f15309e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 5fbe36f..26ef1b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -22,8 +22,6 @@ import com.google.inject.Guice;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.json.JSONJAXBContext;
-import com.sun.jersey.api.json.JSONMarshaller;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.WebAppDescriptor;
import org.apache.hadoop.conf.Configuration;
@@ -35,11 +33,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -50,7 +49,6 @@ import javax.ws.rs.core.Response.Status;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
-import java.io.StringWriter;
import java.util.HashMap;
import java.util.Map;
@@ -183,8 +181,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -218,8 +216,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -244,8 +242,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -269,8 +267,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -300,8 +298,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -332,8 +330,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -360,8 +358,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
CapacitySchedulerConfiguration newCSConf =
@@ -395,8 +393,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
@@ -413,8 +411,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
newCSConf = cs.getConfiguration();
@@ -443,8 +441,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
CapacitySchedulerConfiguration newCSConf =
@@ -468,8 +466,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
CapacitySchedulerConfiguration newCSConf =
@@ -483,8 +481,8 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
r.path("ws").path("v1").path("cluster")
.path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
- MediaType.APPLICATION_JSON)
+ .entity(YarnWebServiceUtils.toJson(updateInfo,
+ SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
newCSConf =
@@ -506,13 +504,4 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
}
super.tearDown();
}
-
- @SuppressWarnings("rawtypes")
- private String toJson(Object nsli, Class klass) throws Exception {
- StringWriter sw = new StringWriter();
- JSONJAXBContext ctx = new JSONJAXBContext(klass);
- JSONMarshaller jm = ctx.createJSONMarshaller();
- jm.marshallToJSON(nsli, sw);
- return sw.toString();
- }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/20] hadoop git commit: YARN-5948. Implement
MutableConfigurationManager for handling storage into configuration store
Posted by xg...@apache.org.
YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc19c35f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc19c35f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc19c35f
Branch: refs/heads/YARN-5734
Commit: fc19c35f6faa320cbee3ef5220fe5592fca6ff9e
Parents: 6023666
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:37 2017 -0700
----------------------------------------------------------------------
.../hadoop/yarn/conf/YarnConfiguration.java | 6 ++
.../src/main/resources/yarn-default.xml | 12 +++
.../scheduler/MutableConfigurationProvider.java | 35 ++++++++
.../scheduler/capacity/CapacityScheduler.java | 14 ++-
.../CapacitySchedulerConfiguration.java | 3 +
.../capacity/conf/CSConfigurationProvider.java | 3 +-
.../conf/MutableCSConfigurationProvider.java | 94 ++++++++++++++++++++
.../conf/YarnConfigurationStoreFactory.java | 46 ++++++++++
.../TestMutableCSConfigurationProvider.java | 83 +++++++++++++++++
9 files changed, 291 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 93437e3..ce413f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -621,6 +621,12 @@ public class YarnConfiguration extends Configuration {
public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
"org.apache.hadoop.yarn.LocalConfigurationProvider";
+ public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+ YARN_PREFIX + "scheduler.configuration.store.class";
+ public static final String MEMORY_CONFIGURATION_STORE = "memory";
+ public static final String DEFAULT_CONFIGURATION_STORE =
+ MEMORY_CONFIGURATION_STORE;
+
public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
+ "authorization-provider";
private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 7ddcfcd..74ff747 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3136,4 +3136,16 @@
<value>user-group</value>
</property>
+ <property>
+ <description>
+ The type of configuration store to use for storing scheduler
+ configurations, if using a mutable configuration provider.
+ Keywords such as "memory" map to certain configuration store
+ implementations. If keyword is not found, try to load this
+ value as a class.
+ </description>
+ <name>yarn.scheduler.configuration.store.class</name>
+ <value>memory</value>
+ </property>
+
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 0000000..da30a2b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.util.Map;
+
+/**
+ * Interface for allowing changing scheduler configurations.
+ */
+public interface MutableConfigurationProvider {
+
+ /**
+ * Update the scheduler configuration with the provided key value pairs.
+ * @param user User issuing the request
+ * @param confUpdate Key-value pairs for configurations to be updated.
+ */
+ void mutateConfiguration(String user, Map<String, String> confUpdate);
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index a6feb09..ca6e872 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.MutableCSConfigurationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -291,10 +292,15 @@ public class CapacityScheduler extends
String confProviderStr = configuration.get(
CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
- if (confProviderStr.equals(
- CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
- this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
- } else {
+ switch (confProviderStr) {
+ case CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER:
+ this.csConfProvider =
+ new FileBasedCSConfigurationProvider(rmContext);
+ break;
+ case CapacitySchedulerConfiguration.STORE_CS_CONF_PROVIDER:
+ this.csConfProvider = new MutableCSConfigurationProvider(rmContext);
+ break;
+ default:
throw new IOException("Invalid CS configuration provider: " +
confProviderStr);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index ac1a1d9..f7f7ac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -309,6 +309,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
public static final String FILE_CS_CONF_PROVIDER = "file";
@Private
+ public static final String STORE_CS_CONF_PROVIDER = "store";
+
+ @Private
public static final String DEFAULT_CS_CONF_PROVIDER = FILE_CS_CONF_PROVIDER;
AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
index c9984ac..0d2c8bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/CSConfigurationProvider.java
@@ -32,8 +32,9 @@ public interface CSConfigurationProvider {
/**
* Initialize the configuration provider with given conf.
* @param conf configuration to initialize with
+ * @throws IOException if initialization fails due to misconfiguration
*/
- void init(Configuration conf);
+ void init(Configuration conf) throws IOException;
/**
* Loads capacity scheduler configuration object.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
new file mode 100644
index 0000000..267ab6a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * CS configuration provider which implements
+ * {@link MutableConfigurationProvider} for modifying capacity scheduler
+ * configuration.
+ */
+public class MutableCSConfigurationProvider implements CSConfigurationProvider,
+ MutableConfigurationProvider {
+
+ private Configuration schedConf;
+ private YarnConfigurationStore confStore;
+ private RMContext rmContext;
+ private Configuration conf;
+
+ public MutableCSConfigurationProvider(RMContext rmContext) {
+ this.rmContext = rmContext;
+ }
+
+ @Override
+ public void init(Configuration config) throws IOException {
+ String store = config.get(
+ YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+ YarnConfiguration.DEFAULT_CONFIGURATION_STORE);
+ switch (store) {
+ case YarnConfiguration.MEMORY_CONFIGURATION_STORE:
+ this.confStore = new InMemoryConfigurationStore();
+ break;
+ default:
+ this.confStore = YarnConfigurationStoreFactory.getStore(config);
+ break;
+ }
+ Configuration initialSchedConf = new Configuration(false);
+ initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
+ this.schedConf = initialSchedConf;
+ confStore.initialize(config, initialSchedConf);
+ this.conf = config;
+ }
+
+ @Override
+ public CapacitySchedulerConfiguration loadConfiguration(Configuration
+ configuration) throws IOException {
+ Configuration loadedConf = new Configuration(configuration);
+ loadedConf.addResource(schedConf);
+ return new CapacitySchedulerConfiguration(loadedConf, false);
+ }
+
+ @Override
+ public void mutateConfiguration(String user,
+ Map<String, String> confUpdate) {
+ Configuration oldConf = new Configuration(schedConf);
+ LogMutation log = new LogMutation(confUpdate, user);
+ long id = confStore.logMutation(log);
+ for (Map.Entry<String, String> kv : confUpdate.entrySet()) {
+ schedConf.set(kv.getKey(), kv.getValue());
+ }
+ try {
+ rmContext.getScheduler().reinitialize(conf, rmContext);
+ } catch (IOException e) {
+ schedConf = oldConf;
+ confStore.confirmMutation(id, false);
+ return;
+ }
+ confStore.confirmMutation(id, true);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
new file mode 100644
index 0000000..60249c8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStoreFactory.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Factory class for creating instances of {@link YarnConfigurationStore}.
+ */
+public final class YarnConfigurationStoreFactory {
+
+ private static final Log LOG = LogFactory.getLog(
+ YarnConfigurationStoreFactory.class);
+
+ private YarnConfigurationStoreFactory() {
+ // Unused.
+ }
+
+ public static YarnConfigurationStore getStore(Configuration conf) {
+ Class<? extends YarnConfigurationStore> storeClass =
+ conf.getClass(YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+ InMemoryConfigurationStore.class, YarnConfigurationStore.class);
+ LOG.info("Using YarnConfigurationStore implementation - " + storeClass);
+ return ReflectionUtils.newInstance(storeClass, conf);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc19c35f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
new file mode 100644
index 0000000..3f103b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests {@link MutableCSConfigurationProvider}.
+ */
+public class TestMutableCSConfigurationProvider {
+
+ private MutableCSConfigurationProvider confProvider;
+ private RMContext rmContext;
+ private Map<String, String> goodUpdate;
+ private Map<String, String> badUpdate;
+ private CapacityScheduler cs;
+
+ private static final String TEST_USER = "testUser";
+
+ @Before
+ public void setUp() {
+ cs = mock(CapacityScheduler.class);
+ rmContext = mock(RMContext.class);
+ when(rmContext.getScheduler()).thenReturn(cs);
+ confProvider = new MutableCSConfigurationProvider(rmContext);
+ goodUpdate = new HashMap<>();
+ goodUpdate.put("goodKey", "goodVal");
+ badUpdate = new HashMap<>();
+ badUpdate.put("badKey", "badVal");
+ }
+
+ @Test
+ public void testInMemoryBackedProvider() throws IOException {
+ Configuration conf = new Configuration();
+ confProvider.init(conf);
+ assertNull(confProvider.loadConfiguration(conf)
+ .get("goodKey"));
+
+ doNothing().when(cs).reinitialize(any(Configuration.class),
+ any(RMContext.class));
+ confProvider.mutateConfiguration(TEST_USER, goodUpdate);
+ assertEquals("goodVal", confProvider.loadConfiguration(conf)
+ .get("goodKey"));
+
+ assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+ doThrow(new IOException()).when(cs).reinitialize(any(Configuration.class),
+ any(RMContext.class));
+ confProvider.mutateConfiguration(TEST_USER, badUpdate);
+ assertNull(confProvider.loadConfiguration(conf).get("badKey"));
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/20] hadoop git commit: YARN-6575. Support global configuration
mutation in MutableConfProvider. (Jonathan Hung via Xuan Gong)
Posted by xg...@apache.org.
YARN-6575. Support global configuration mutation in MutableConfProvider. (Jonathan Hung via Xuan Gong)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/087477c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/087477c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/087477c2
Branch: refs/heads/YARN-5734
Commit: 087477c2d366c5393caec904dc1c4c8ae2548c5d
Parents: 2c1dcf5
Author: Xuan <xg...@apache.org>
Authored: Mon Jun 5 16:30:38 2017 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue Aug 1 08:46:41 2017 -0700
----------------------------------------------------------------------
.../ConfigurationMutationACLPolicy.java | 4 +-
.../DefaultConfigurationMutationACLPolicy.java | 4 +-
.../scheduler/MutableConfScheduler.java | 4 +-
.../scheduler/MutableConfigurationProvider.java | 4 +-
.../scheduler/capacity/CapacityScheduler.java | 4 +-
.../conf/MutableCSConfigurationProvider.java | 10 +-
...ueueAdminConfigurationMutationACLPolicy.java | 22 +++-
.../resourcemanager/webapp/RMWebServices.java | 4 +-
.../webapp/dao/QueueConfigsUpdateInfo.java | 60 -----------
.../webapp/dao/SchedConfUpdateInfo.java | 69 +++++++++++++
.../TestConfigurationMutationACLPolicies.java | 28 ++++-
.../TestMutableCSConfigurationProvider.java | 10 +-
.../TestRMWebServicesConfigurationMutation.java | 101 +++++++++++++------
13 files changed, 205 insertions(+), 119 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
index 724487b..3a388fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ConfigurationMutationACLPolicy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
/**
* Interface for determining whether configuration mutations are allowed.
@@ -41,7 +41,7 @@ public interface ConfigurationMutationACLPolicy {
* @param confUpdate configurations to be updated
* @return whether provided mutation is allowed or not
*/
- boolean isMutationAllowed(UserGroupInformation user, QueueConfigsUpdateInfo
+ boolean isMutationAllowed(UserGroupInformation user, SchedConfUpdateInfo
confUpdate);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
index 680c3b8..6648668 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/DefaultConfigurationMutationACLPolicy.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
/**
* Default configuration mutation ACL policy. Checks if user is YARN admin.
@@ -39,7 +39,7 @@ public class DefaultConfigurationMutationACLPolicy implements
@Override
public boolean isMutationAllowed(UserGroupInformation user,
- QueueConfigsUpdateInfo confUpdate) {
+ SchedConfUpdateInfo confUpdate) {
return authorizer.isAdmin(user);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
index 93a935e..027d944 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfScheduler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
@@ -36,7 +36,7 @@ public interface MutableConfScheduler extends ResourceScheduler {
* @throws IOException if update is invalid
*/
void updateConfiguration(UserGroupInformation user,
- QueueConfigsUpdateInfo confUpdate) throws IOException;
+ SchedConfUpdateInfo confUpdate) throws IOException;
/**
* Get the scheduler configuration.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index f04c128..6b8306c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
@@ -34,7 +34,7 @@ public interface MutableConfigurationProvider {
* @param confUpdate Key-value pairs for configurations to be updated.
* @throws IOException if scheduler could not be reinitialized
*/
- void mutateConfiguration(UserGroupInformation user, QueueConfigsUpdateInfo
+ void mutateConfiguration(UserGroupInformation user, SchedConfUpdateInfo
confUpdate) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 5bcb352..6f637a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -137,7 +137,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import org.apache.hadoop.yarn.server.utils.Lock;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -2519,7 +2519,7 @@ public class CapacityScheduler extends
@Override
public void updateConfiguration(UserGroupInformation user,
- QueueConfigsUpdateInfo confUpdate) throws IOException {
+ SchedConfUpdateInfo confUpdate) throws IOException {
if (csConfProvider instanceof MutableConfigurationProvider) {
((MutableConfigurationProvider) csConfProvider).mutateConfiguration(
user, confUpdate);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 8b879b0..eb97260 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
import java.util.ArrayList;
@@ -98,7 +98,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
@Override
public void mutateConfiguration(UserGroupInformation user,
- QueueConfigsUpdateInfo confUpdate) throws IOException {
+ SchedConfUpdateInfo confUpdate) throws IOException {
if (!aclMutationPolicy.isMutationAllowed(user, confUpdate)) {
throw new AccessControlException("User is not admin of all modified" +
" queues.");
@@ -126,7 +126,7 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
private Map<String, String> constructKeyValueConfUpdate(
- QueueConfigsUpdateInfo mutationInfo) throws IOException {
+ SchedConfUpdateInfo mutationInfo) throws IOException {
CapacityScheduler cs = (CapacityScheduler) rmContext.getScheduler();
CapacitySchedulerConfiguration proposedConf =
new CapacitySchedulerConfiguration(cs.getConfiguration(), false);
@@ -140,6 +140,10 @@ public class MutableCSConfigurationProvider implements CSConfigurationProvider,
for (QueueConfigInfo updateQueueInfo : mutationInfo.getUpdateQueueInfo()) {
updateQueue(updateQueueInfo, proposedConf, confUpdate);
}
+ for (Map.Entry<String, String> global : mutationInfo.getGlobalParams()
+ .entrySet()) {
+ confUpdate.put(global.getKey(), global.getValue());
+ }
return confUpdate;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
index 1f94c1c..0a82d50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/QueueAdminConfigurationMutationACLPolicy.java
@@ -22,15 +22,17 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ConfigurationMutationACLPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import java.io.IOException;
import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
/**
@@ -40,16 +42,29 @@ import java.util.Set;
public class QueueAdminConfigurationMutationACLPolicy implements
ConfigurationMutationACLPolicy {
+ private Configuration conf;
private RMContext rmContext;
+ private YarnAuthorizationProvider authorizer;
@Override
- public void init(Configuration conf, RMContext context) {
+ public void init(Configuration config, RMContext context) {
+ this.conf = config;
this.rmContext = context;
+ this.authorizer = YarnAuthorizationProvider.getInstance(conf);
}
@Override
public boolean isMutationAllowed(UserGroupInformation user,
- QueueConfigsUpdateInfo confUpdate) {
+ SchedConfUpdateInfo confUpdate) {
+ // If there are global config changes, check if user is admin.
+ Map<String, String> globalParams = confUpdate.getGlobalParams();
+ if (globalParams != null && globalParams.size() != 0) {
+ if (!authorizer.isAdmin(user)) {
+ return false;
+ }
+ }
+
+ // Check if user is admin of all modified queues.
Set<String> queues = new HashSet<>();
for (QueueConfigInfo addQueueInfo : confUpdate.getAddQueueInfo()) {
queues.add(addQueueInfo.getQueue());
@@ -71,7 +86,6 @@ public class QueueAdminConfigurationMutationACLPolicy implements
// Queue is not found, do nothing.
}
String parentPath = queuePath;
- // TODO: handle global config change.
while (queueInfo == null) {
// We are adding a queue (whose parent we are possibly also adding).
// Check ACL of lowest parent queue which already exists.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index d670748..ae1ebad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -2409,11 +2409,11 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
}
@PUT
- @Path("/queues")
+ @Path("/sched-conf")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
- public Response updateSchedulerConfiguration(QueueConfigsUpdateInfo
+ public Response updateSchedulerConfiguration(SchedConfUpdateInfo
mutationInfo, @Context HttpServletRequest hsr)
throws AuthorizationException, InterruptedException {
init();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
deleted file mode 100644
index 644ec90..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/QueueConfigsUpdateInfo.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
-
-import java.util.ArrayList;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-/**
- * Information for making scheduler configuration changes (supports adding,
- * removing, or updating a queue).
- */
-@XmlRootElement(name = "schedConf")
-@XmlAccessorType(XmlAccessType.FIELD)
-public class QueueConfigsUpdateInfo {
-
- @XmlElement(name = "add")
- private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
-
- @XmlElement(name = "remove")
- private ArrayList<String> removeQueueInfo = new ArrayList<>();
-
- @XmlElement(name = "update")
- private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
-
- public QueueConfigsUpdateInfo() {
- // JAXB needs this
- }
-
- public ArrayList<QueueConfigInfo> getAddQueueInfo() {
- return addQueueInfo;
- }
-
- public ArrayList<String> getRemoveQueueInfo() {
- return removeQueueInfo;
- }
-
- public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
- return updateQueueInfo;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
new file mode 100644
index 0000000..b7c585e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Information for making scheduler configuration changes (supports adding,
+ * removing, or updating a queue, as well as global scheduler conf changes).
+ */
+@XmlRootElement(name = "schedConf")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class SchedConfUpdateInfo {
+
+ @XmlElement(name = "add-queue")
+ private ArrayList<QueueConfigInfo> addQueueInfo = new ArrayList<>();
+
+ @XmlElement(name = "remove-queue")
+ private ArrayList<String> removeQueueInfo = new ArrayList<>();
+
+ @XmlElement(name = "update-queue")
+ private ArrayList<QueueConfigInfo> updateQueueInfo = new ArrayList<>();
+
+ private HashMap<String, String> global = new HashMap<>();
+
+ public SchedConfUpdateInfo() {
+ // JAXB needs this
+ }
+
+ public ArrayList<QueueConfigInfo> getAddQueueInfo() {
+ return addQueueInfo;
+ }
+
+ public ArrayList<String> getRemoveQueueInfo() {
+ return removeQueueInfo;
+ }
+
+ public ArrayList<QueueConfigInfo> getUpdateQueueInfo() {
+ return updateQueueInfo;
+ }
+
+ @XmlElementWrapper(name = "global-updates")
+ public HashMap<String, String> getGlobalParams() {
+ return global;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 4016dcf..0f5a3d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import org.junit.Before;
import org.junit.Test;
@@ -77,6 +77,7 @@ public class TestConfigurationMutationACLPolicies {
.thenReturn(false);
when(scheduler.getQueue(eq(queueName))).thenReturn(queue);
}
+
@Test
public void testDefaultPolicy() {
Configuration conf = new Configuration();
@@ -98,7 +99,7 @@ public class TestConfigurationMutationACLPolicies {
ConfigurationMutationACLPolicy.class);
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
QueueConfigInfo configInfo = new QueueConfigInfo("root.a", EMPTY_MAP);
updateInfo.getUpdateQueueInfo().add(configInfo);
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
@@ -114,7 +115,7 @@ public class TestConfigurationMutationACLPolicies {
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
// Add root.b.b1. Should check ACL of root.b queue.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2", EMPTY_MAP);
updateInfo.getAddQueueInfo().add(configInfo);
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
@@ -130,7 +131,7 @@ public class TestConfigurationMutationACLPolicies {
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
// Add root.b.b1.b11. Should check ACL of root.b queue.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
QueueConfigInfo configInfo = new QueueConfigInfo("root.b.b2.b21", EMPTY_MAP);
updateInfo.getAddQueueInfo().add(configInfo);
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
@@ -146,9 +147,26 @@ public class TestConfigurationMutationACLPolicies {
policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
policy.init(conf, rmContext);
// Remove root.b.b1.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.getRemoveQueueInfo().add("root.b.b1");
assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
}
+
+ @Test
+ public void testQueueAdminPolicyGlobal() {
+ Configuration conf = new Configuration();
+ conf.set(YarnConfiguration.YARN_ADMIN_ACL, GOOD_USER.getShortUserName());
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS,
+ QueueAdminConfigurationMutationACLPolicy.class,
+ ConfigurationMutationACLPolicy.class);
+ policy = ConfigurationMutationACLPolicyFactory.getPolicy(conf);
+ policy.init(conf, rmContext);
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
+ assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+ assertTrue(policy.isMutationAllowed(BAD_USER, updateInfo));
+ updateInfo.getGlobalParams().put("globalKey", "globalValue");
+ assertTrue(policy.isMutationAllowed(GOOD_USER, updateInfo));
+ assertFalse(policy.isMutationAllowed(BAD_USER, updateInfo));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 13229b1..3216781 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import org.junit.Before;
import org.junit.Test;
@@ -47,8 +47,8 @@ public class TestMutableCSConfigurationProvider {
private MutableCSConfigurationProvider confProvider;
private RMContext rmContext;
- private QueueConfigsUpdateInfo goodUpdate;
- private QueueConfigsUpdateInfo badUpdate;
+ private SchedConfUpdateInfo goodUpdate;
+ private SchedConfUpdateInfo badUpdate;
private CapacityScheduler cs;
private static final UserGroupInformation TEST_USER = UserGroupInformation
@@ -62,14 +62,14 @@ public class TestMutableCSConfigurationProvider {
when(cs.getConfiguration()).thenReturn(
new CapacitySchedulerConfiguration());
confProvider = new MutableCSConfigurationProvider(rmContext);
- goodUpdate = new QueueConfigsUpdateInfo();
+ goodUpdate = new SchedConfUpdateInfo();
Map<String, String> goodUpdateMap = new HashMap<>();
goodUpdateMap.put("goodKey", "goodVal");
QueueConfigInfo goodUpdateInfo = new
QueueConfigInfo("root.a", goodUpdateMap);
goodUpdate.getUpdateQueueInfo().add(goodUpdateInfo);
- badUpdate = new QueueConfigsUpdateInfo();
+ badUpdate = new SchedConfUpdateInfo();
Map<String, String> badUpdateMap = new HashMap<>();
badUpdateMap.put("badKey", "badVal");
QueueConfigInfo badUpdateInfo = new
http://git-wip-us.apache.org/repos/asf/hadoop/blob/087477c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index d149055..5fbe36f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigsUpdateInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -162,7 +162,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
ClientResponse response;
// Add parent queue root.d with two children d1 and d2.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> d1Capacity = new HashMap<>();
d1Capacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
d1Capacity.put(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY, "25");
@@ -181,9 +181,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
updateInfo.getAddQueueInfo().add(d);
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
@@ -205,7 +205,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
ClientResponse response;
// Add root.d with capacity 25, reducing root.b capacity from 75 to 50.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> dCapacity = new HashMap<>();
dCapacity.put(CapacitySchedulerConfiguration.CAPACITY, "25");
Map<String, String> bCapacity = new HashMap<>();
@@ -216,9 +216,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
updateInfo.getUpdateQueueInfo().add(b);
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
@@ -238,13 +238,13 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
stopQueue("root.a.a2");
// Remove root.a.a2
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.getRemoveQueueInfo().add("root.a.a2");
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
@@ -263,13 +263,13 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
stopQueue("root.c", "root.c.c1");
// Remove root.c (parent queue)
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.getRemoveQueueInfo().add("root.c");
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
@@ -288,7 +288,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
stopQueue("root.a", "root.a.a1", "root.a.a2");
// Remove root.a (parent queue) with capacity 25
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.getRemoveQueueInfo().add("root.a");
// Set root.b capacity to 100
@@ -298,9 +298,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
updateInfo.getUpdateQueueInfo().add(b);
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
@@ -320,7 +320,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
stopQueue("root.b", "root.c", "root.c.c1");
// Remove root.b and root.c
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
updateInfo.getRemoveQueueInfo().add("root.b");
updateInfo.getRemoveQueueInfo().add("root.c");
Map<String, String> aCapacity = new HashMap<>();
@@ -330,9 +330,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
updateInfo.getUpdateQueueInfo().add(configInfo);
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
@@ -348,7 +348,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
ClientResponse response;
// Set state of queues to STOPPED.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> stoppedParam = new HashMap<>();
stoppedParam.put(CapacitySchedulerConfiguration.STATE,
QueueState.STOPPED.toString());
@@ -358,9 +358,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
}
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -378,7 +378,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
ClientResponse response;
// Update config value.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> updateParam = new HashMap<>();
updateParam.put(CapacitySchedulerConfiguration.MAXIMUM_AM_RESOURCE_SUFFIX,
"0.2");
@@ -393,9 +393,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
0.001f);
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -411,9 +411,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
updateInfo.getUpdateQueueInfo().add(aUpdateInfo);
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -431,7 +431,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
ClientResponse response;
// Update root.a and root.b capacity to 50.
- QueueConfigsUpdateInfo updateInfo = new QueueConfigsUpdateInfo();
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> updateParam = new HashMap<>();
updateParam.put(CapacitySchedulerConfiguration.CAPACITY, "50");
QueueConfigInfo aUpdateInfo = new QueueConfigInfo("root.a", updateParam);
@@ -441,9 +441,9 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
response =
r.path("ws").path("v1").path("cluster")
- .path("queues").queryParam("user.name", userName)
+ .path("sched-conf").queryParam("user.name", userName)
.accept(MediaType.APPLICATION_JSON)
- .entity(toJson(updateInfo, QueueConfigsUpdateInfo.class),
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
MediaType.APPLICATION_JSON)
.put(ClientResponse.class);
assertEquals(Status.OK.getStatusCode(), response.getStatus());
@@ -453,6 +453,47 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
assertEquals(50.0f, newCSConf.getNonLabeledQueueCapacity("root.b"), 0.01f);
}
+ @Test
+ public void testGlobalConfChange() throws Exception {
+ WebResource r = resource();
+
+ ClientResponse response;
+
+ // Set maximum-applications to 30000.
+ SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
+ updateInfo.getGlobalParams().put(CapacitySchedulerConfiguration.PREFIX +
+ "maximum-applications", "30000");
+
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("sched-conf").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ CapacitySchedulerConfiguration newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(30000, newCSConf.getMaximumSystemApplications());
+
+ updateInfo.getGlobalParams().put(CapacitySchedulerConfiguration.PREFIX +
+ "maximum-applications", null);
+ // Unset maximum-applications. Should be set to default.
+ response =
+ r.path("ws").path("v1").path("cluster")
+ .path("sched-conf").queryParam("user.name", userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(updateInfo, SchedConfUpdateInfo.class),
+ MediaType.APPLICATION_JSON)
+ .put(ClientResponse.class);
+ assertEquals(Status.OK.getStatusCode(), response.getStatus());
+ newCSConf =
+ ((CapacityScheduler) rm.getResourceScheduler()).getConfiguration();
+ assertEquals(CapacitySchedulerConfiguration
+ .DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS,
+ newCSConf.getMaximumSystemApplications());
+ }
+
@Override
@After
public void tearDown() throws Exception {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/20] hadoop git commit: HADOOP-14644. Increase max heap size of
Maven javadoc plugin. Contributed by Andras Bokor.
Posted by xg...@apache.org.
HADOOP-14644. Increase max heap size of Maven javadoc plugin. Contributed by Andras Bokor.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2be9412b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2be9412b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2be9412b
Branch: refs/heads/YARN-5734
Commit: 2be9412b73ae4308c5cee0186520fc2ad6d54e43
Parents: a7d8586
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 31 15:09:34 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 31 15:09:34 2017 -0700
----------------------------------------------------------------------
hadoop-project-dist/pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be9412b/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 6e73c0e..9da5e53 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -102,7 +102,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
- <maxmemory>512m</maxmemory>
+ <maxmemory>768m</maxmemory>
<quiet>true</quiet>
<verbose>false</verbose>
<source>${maven.compile.source}</source>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/20] hadoop git commit: MAPREDUCE-6921.
TestUmbilicalProtocolWithJobToken#testJobTokenRpc fails.
Posted by xg...@apache.org.
MAPREDUCE-6921. TestUmbilicalProtocolWithJobToken#testJobTokenRpc fails.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ceacadc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ceacadc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ceacadc5
Branch: refs/heads/YARN-5734
Commit: ceacadc51e58bb94ad3f3669488515a61e886d88
Parents: a4aa1cb
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Aug 1 14:56:42 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Aug 1 14:56:42 2017 +0900
----------------------------------------------------------------------
.../TestUmbilicalProtocolWithJobToken.java | 22 +++++++++-----------
1 file changed, 10 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ceacadc5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
index d1004b6..5d53663 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
@@ -29,12 +29,10 @@ import static org.mockito.Mockito.doReturn;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
-import org.apache.commons.logging.*;
-import org.apache.commons.logging.impl.Log4JLogger;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
@@ -49,9 +47,10 @@ import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-
-import org.apache.log4j.Level;
+import org.slf4j.Logger;
+import org.slf4j.event.Level;
import org.junit.Test;
+import static org.slf4j.LoggerFactory.getLogger;
/** Unit tests for using Job Token over RPC.
*
@@ -62,8 +61,7 @@ import org.junit.Test;
public class TestUmbilicalProtocolWithJobToken {
private static final String ADDRESS = "0.0.0.0";
- public static final Log LOG = LogFactory
- .getLog(TestUmbilicalProtocolWithJobToken.class);
+ public static final Logger LOG = getLogger(TestUmbilicalProtocolWithJobToken.class);
private static Configuration conf;
static {
@@ -73,11 +71,11 @@ public class TestUmbilicalProtocolWithJobToken {
}
static {
- ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
- ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
- ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
- ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
- ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
+ GenericTestUtils.setLogLevel(Client.LOG, Level.TRACE);
+ GenericTestUtils.setLogLevel(Server.LOG, Level.TRACE);
+ GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.TRACE);
+ GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.TRACE);
+ GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.TRACE);
}
@Test
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/20] hadoop git commit: YARN-6873. Moving logging APIs over to
slf4j in hadoop-yarn-server-applicationhistoryservice. Contributed by Yeliang
Cang.
Posted by xg...@apache.org.
YARN-6873. Moving logging APIs over to slf4j in hadoop-yarn-server-applicationhistoryservice. Contributed by Yeliang Cang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a78c0ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a78c0ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a78c0ff
Branch: refs/heads/YARN-5734
Commit: 1a78c0ff016097930edf68e8278f826b637e918c
Parents: ea56812
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Aug 1 10:53:32 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Aug 1 10:53:32 2017 +0900
----------------------------------------------------------------------
.../ApplicationHistoryClientService.java | 8 ++---
.../ApplicationHistoryManagerImpl.java | 8 ++---
...pplicationHistoryManagerOnTimelineStore.java | 8 ++---
.../ApplicationHistoryServer.java | 10 +++---
.../FileSystemApplicationHistoryStore.java | 22 ++++++------
.../webapp/AHSWebServices.java | 7 ++--
.../webapp/NavBlock.java | 6 ++--
.../timeline/KeyValueBasedTimelineStore.java | 8 ++---
.../server/timeline/LeveldbTimelineStore.java | 35 ++++++++++----------
.../yarn/server/timeline/RollingLevelDB.java | 15 +++++----
.../timeline/RollingLevelDBTimelineStore.java | 22 ++++++------
.../server/timeline/TimelineDataManager.java | 7 ++--
.../recovery/LeveldbTimelineStateStore.java | 30 ++++++++---------
.../timeline/security/TimelineACLsManager.java | 7 ++--
...lineDelegationTokenSecretManagerService.java | 8 ++---
.../timeline/webapp/TimelineWebServices.java | 7 ++--
.../TestFileSystemApplicationHistoryStore.java | 8 ++---
.../timeline/TestLeveldbTimelineStore.java | 2 +-
18 files changed, 111 insertions(+), 107 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
index 73d5d39..7d57048 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
@@ -22,8 +22,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -61,11 +59,13 @@ import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ApplicationHistoryClientService extends AbstractService implements
ApplicationHistoryProtocol {
- private static final Log LOG = LogFactory
- .getLog(ApplicationHistoryClientService.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ApplicationHistoryClientService.class);
private ApplicationHistoryManager history;
private Server server;
private InetSocketAddress bindAddress;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
index 130bb32..b8931d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
@@ -23,8 +23,6 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
@@ -42,11 +40,13 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.Container
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ApplicationHistoryManagerImpl extends AbstractService implements
ApplicationHistoryManager {
- private static final Log LOG = LogFactory
- .getLog(ApplicationHistoryManagerImpl.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ApplicationHistoryManagerImpl.class);
private static final String UNAVAILABLE = "N/A";
private ApplicationHistoryStore historyStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 5404338..9240ed8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -28,8 +28,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -69,12 +67,14 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
implements
ApplicationHistoryManager {
- private static final Log LOG = LogFactory
- .getLog(ApplicationHistoryManagerOnTimelineStore.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(ApplicationHistoryManagerOnTimelineStore.class);
@VisibleForTesting
static final String UNAVAILABLE = "N/A";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 6e6e98b..85e5f2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -22,8 +22,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
@@ -60,6 +58,8 @@ import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.webapp.WebAppContext;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* History server that keeps track of all types of history in the cluster.
@@ -68,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
public class ApplicationHistoryServer extends CompositeService {
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
- private static final Log LOG = LogFactory
- .getLog(ApplicationHistoryServer.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(ApplicationHistoryServer.class);
private ApplicationHistoryClientService ahsClientService;
private ApplicationACLsManager aclsManager;
@@ -178,7 +178,7 @@ public class ApplicationHistoryServer extends CompositeService {
appHistoryServer.init(conf);
appHistoryServer.start();
} catch (Throwable t) {
- LOG.fatal("Error starting ApplicationHistoryServer", t);
+ LOG.error("Error starting ApplicationHistoryServer", t);
ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
}
return appHistoryServer;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
index be7bc6d..fa2da44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
@@ -30,8 +30,6 @@ import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@@ -74,6 +72,8 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.C
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.protobuf.InvalidProtocolBufferException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* File system implementation of {@link ApplicationHistoryStore}. In this
@@ -89,8 +89,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
public class FileSystemApplicationHistoryStore extends AbstractService
implements ApplicationHistoryStore {
- private static final Log LOG = LogFactory
- .getLog(FileSystemApplicationHistoryStore.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(FileSystemApplicationHistoryStore.class);
private static final String ROOT_DIR_NAME = "ApplicationHistoryDataRoot";
private static final int MIN_BLOCK_SIZE = 256 * 1024;
@@ -141,7 +141,7 @@ public class FileSystemApplicationHistoryStore extends AbstractService
}
outstandingWriters.clear();
} finally {
- IOUtils.cleanup(LOG, fs);
+ IOUtils.cleanupWithLogger(LOG, fs);
}
super.serviceStop();
}
@@ -711,12 +711,12 @@ public class FileSystemApplicationHistoryStore extends AbstractService
}
public void reset() throws IOException {
- IOUtils.cleanup(LOG, scanner);
+ IOUtils.cleanupWithLogger(LOG, scanner);
scanner = reader.createScanner();
}
public void close() {
- IOUtils.cleanup(LOG, scanner, reader, fsdis);
+ IOUtils.cleanupWithLogger(LOG, scanner, reader, fsdis);
}
}
@@ -740,13 +740,13 @@ public class FileSystemApplicationHistoryStore extends AbstractService
YarnConfiguration.DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE), null,
getConfig());
} catch (IOException e) {
- IOUtils.cleanup(LOG, fsdos);
+ IOUtils.cleanupWithLogger(LOG, fsdos);
throw e;
}
}
public synchronized void close() {
- IOUtils.cleanup(LOG, writer, fsdos);
+ IOUtils.cleanupWithLogger(LOG, writer, fsdos);
}
public synchronized void writeHistoryData(HistoryDataKey key, byte[] value)
@@ -756,13 +756,13 @@ public class FileSystemApplicationHistoryStore extends AbstractService
dos = writer.prepareAppendKey(-1);
key.write(dos);
} finally {
- IOUtils.cleanup(LOG, dos);
+ IOUtils.cleanupWithLogger(LOG, dos);
}
try {
dos = writer.prepareAppendValue(value.length);
dos.write(value);
} finally {
- IOUtils.cleanup(LOG, dos);
+ IOUtils.cleanupWithLogger(LOG, dos);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 6195199..13410a8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -42,8 +42,6 @@ import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -80,12 +78,15 @@ import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.sun.jersey.api.client.ClientHandlerException;
import com.sun.jersey.api.client.UniformInterfaceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@Singleton
@Path("/ws/v1/applicationhistory")
public class AHSWebServices extends WebServices {
- private static final Log LOG = LogFactory.getLog(AHSWebServices.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(AHSWebServices.class);
private static final String NM_DOWNLOAD_URI_STR =
"/ws/v1/node/containers";
private static final Joiner JOINER = Joiner.on("");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
index 3ee4dd1..a260634 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -18,20 +18,20 @@
package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class NavBlock extends HtmlBlock {
@Override
public void render(Block html) {
boolean addErrorsAndWarningsLink = false;
- Log log = LogFactory.getLog(NavBlock.class);
+ Logger log = LoggerFactory.getLogger(NavBlock.class);
if (log instanceof Log4JLogger) {
Log4jWarningErrorMetricsAppender appender =
Log4jWarningErrorMetricsAppender.findAppender();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
index 79e2bf2..82db770 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.yarn.server.timeline;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.service.AbstractService;
@@ -33,6 +31,8 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEnt
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
@@ -71,8 +71,8 @@ abstract class KeyValueBasedTimelineStore
private boolean serviceStopped = false;
- private static final Log LOG
- = LogFactory.getLog(KeyValueBasedTimelineStore.class);
+ private static final Logger LOG
+ = LoggerFactory.getLogger(KeyValueBasedTimelineStore.class);
public KeyValueBasedTimelineStore() {
super(KeyValueBasedTimelineStore.class.getName());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index ffe0413..e3db1dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -22,8 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
@@ -48,6 +46,7 @@ import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.*;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@@ -118,8 +117,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
@InterfaceStability.Unstable
public class LeveldbTimelineStore extends AbstractService
implements TimelineStore {
- private static final Log LOG = LogFactory
- .getLog(LeveldbTimelineStore.class);
+ private static final org.slf4j.Logger LOG = LoggerFactory
+ .getLogger(LeveldbTimelineStore.class);
@Private
@VisibleForTesting
@@ -240,7 +239,7 @@ public class LeveldbTimelineStore extends AbstractService
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
- IOUtils.cleanup(LOG, localFS);
+ IOUtils.cleanupWithLogger(LOG, localFS);
}
LOG.info("Using leveldb path " + dbPath);
try {
@@ -284,7 +283,7 @@ public class LeveldbTimelineStore extends AbstractService
" closing db now", e);
}
}
- IOUtils.cleanup(LOG, db);
+ IOUtils.cleanupWithLogger(LOG, db);
super.serviceStop();
}
@@ -320,7 +319,7 @@ public class LeveldbTimelineStore extends AbstractService
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
- LOG.error(e);
+ LOG.error(e.toString());
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
@@ -394,7 +393,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
}
@@ -570,7 +569,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
return events;
}
@@ -753,7 +752,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
}
@@ -925,7 +924,7 @@ public class LeveldbTimelineStore extends AbstractService
} finally {
lock.unlock();
writeLocks.returnLock(lock);
- IOUtils.cleanup(LOG, writeBatch);
+ IOUtils.cleanupWithLogger(LOG, writeBatch);
}
for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) {
@@ -1376,7 +1375,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
}
@@ -1506,7 +1505,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, writeBatch);
+ IOUtils.cleanupWithLogger(LOG, writeBatch);
}
}
@@ -1548,7 +1547,7 @@ public class LeveldbTimelineStore extends AbstractService
LOG.error("Got IOException while deleting entities for type " +
entityType + ", continuing to next type", e);
} finally {
- IOUtils.cleanup(LOG, iterator, pfIterator);
+ IOUtils.cleanupWithLogger(LOG, iterator, pfIterator);
deleteLock.writeLock().unlock();
if (typeCount > 0) {
LOG.info("Deleted " + typeCount + " entities of type " +
@@ -1629,7 +1628,7 @@ public class LeveldbTimelineStore extends AbstractService
String incompatibleMessage =
"Incompatible version for timeline store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
- LOG.fatal(incompatibleMessage);
+ LOG.error(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
@@ -1718,7 +1717,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, writeBatch);
+ IOUtils.cleanupWithLogger(LOG, writeBatch);
}
}
@@ -1755,7 +1754,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
}
@@ -1805,7 +1804,7 @@ public class LeveldbTimelineStore extends AbstractService
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
index 6d10671..5c511a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java
@@ -33,8 +33,6 @@ import java.util.Map.Entry;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang.time.FastDateFormat;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -45,6 +43,8 @@ import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Contains the logic to lookup a leveldb by timestamp so that multiple smaller
@@ -54,7 +54,8 @@ import org.iq80.leveldb.WriteBatch;
class RollingLevelDB {
/** Logger for this class. */
- private static final Log LOG = LogFactory.getLog(RollingLevelDB.class);
+ private static final Logger LOG = LoggerFactory.
+ getLogger(RollingLevelDB.class);
/** Factory to open and create new leveldb instances. */
private static JniDBFactory factory = new JniDBFactory();
/** Thread safe date formatter. */
@@ -151,7 +152,7 @@ class RollingLevelDB {
}
public void close() {
- IOUtils.cleanup(LOG, writeBatch);
+ IOUtils.cleanupWithLogger(LOG, writeBatch);
}
}
@@ -346,7 +347,7 @@ class RollingLevelDB {
.iterator();
while (iterator.hasNext()) {
Entry<Long, DB> entry = iterator.next();
- IOUtils.cleanup(LOG, entry.getValue());
+ IOUtils.cleanupWithLogger(LOG, entry.getValue());
String dbName = fdf.format(entry.getKey());
Path path = new Path(rollingDBPath, getName() + "." + dbName);
try {
@@ -361,9 +362,9 @@ class RollingLevelDB {
public void stop() throws Exception {
for (DB db : rollingdbs.values()) {
- IOUtils.cleanup(LOG, db);
+ IOUtils.cleanupWithLogger(LOG, db);
}
- IOUtils.cleanup(LOG, lfs);
+ IOUtils.cleanupWithLogger(LOG, lfs);
}
private long computeNextCheckMillis(long now) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 00f6630..1ac170c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -38,8 +38,6 @@ import java.util.TreeMap;
import org.apache.commons.collections.map.LRUMap;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
@@ -76,6 +74,8 @@ import org.iq80.leveldb.ReadOptions;
import org.iq80.leveldb.WriteBatch;
import org.nustaq.serialization.FSTConfiguration;
import org.nustaq.serialization.FSTClazzNameRegistry;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static java.nio.charset.StandardCharsets.UTF_8;
@@ -168,8 +168,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
@InterfaceStability.Unstable
public class RollingLevelDBTimelineStore extends AbstractService implements
TimelineStore {
- private static final Log LOG = LogFactory
- .getLog(RollingLevelDBTimelineStore.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(RollingLevelDBTimelineStore.class);
private static FSTConfiguration fstConf =
FSTConfiguration.createDefaultConfiguration();
// Fall back to 2.24 parsing if 2.50 parsing fails
@@ -368,9 +368,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
+ " closing db now", e);
}
}
- IOUtils.cleanup(LOG, domaindb);
- IOUtils.cleanup(LOG, starttimedb);
- IOUtils.cleanup(LOG, ownerdb);
+ IOUtils.cleanupWithLogger(LOG, domaindb);
+ IOUtils.cleanupWithLogger(LOG, starttimedb);
+ IOUtils.cleanupWithLogger(LOG, ownerdb);
entitydb.stop();
indexdb.stop();
super.serviceStop();
@@ -399,7 +399,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
discardOldEntities(timestamp);
Thread.sleep(ttlInterval);
} catch (IOException e) {
- LOG.error(e);
+ LOG.error(e.toString());
} catch (InterruptedException e) {
LOG.info("Deletion thread received interrupt, exiting");
break;
@@ -1525,7 +1525,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
+ ". Total start times deleted so far this cycle: "
+ startTimesCount);
}
- IOUtils.cleanup(LOG, writeBatch);
+ IOUtils.cleanupWithLogger(LOG, writeBatch);
writeBatch = starttimedb.createWriteBatch();
batchSize = 0;
}
@@ -1545,7 +1545,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
LOG.info("Deleted " + startTimesCount + "/" + totalCount
+ " start time entities earlier than " + minStartTime);
} finally {
- IOUtils.cleanup(LOG, writeBatch);
+ IOUtils.cleanupWithLogger(LOG, writeBatch);
}
return startTimesCount;
}
@@ -1622,7 +1622,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
String incompatibleMessage = "Incompatible version for timeline store: "
+ "expecting version " + getCurrentVersion()
+ ", but loading version " + loadedVersion;
- LOG.fatal(incompatibleMessage);
+ LOG.error(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
index 57a9346..56b71fa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
@@ -26,8 +26,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.SortedSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
@@ -45,6 +43,8 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The class wrap over the timeline store and the ACLs manager. It does some non
@@ -54,7 +54,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class TimelineDataManager extends AbstractService {
- private static final Log LOG = LogFactory.getLog(TimelineDataManager.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TimelineDataManager.class);
@VisibleForTesting
public static final String DEFAULT_DOMAIN_ID = "DEFAULT";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
index b62a541..bcd57ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java
@@ -28,8 +28,6 @@ import java.io.File;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -50,6 +48,8 @@ import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
@@ -60,8 +60,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
public class LeveldbTimelineStateStore extends
TimelineStateStore {
- public static final Log LOG =
- LogFactory.getLog(LeveldbTimelineStateStore.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(LeveldbTimelineStateStore.class);
private static final String DB_NAME = "timeline-state-store.ldb";
private static final FsPermission LEVELDB_DIR_UMASK = FsPermission
@@ -103,7 +103,7 @@ public class LeveldbTimelineStateStore extends
localFS.setPermission(dbPath, LEVELDB_DIR_UMASK);
}
} finally {
- IOUtils.cleanup(LOG, localFS);
+ IOUtils.cleanupWithLogger(LOG, localFS);
}
JniDBFactory factory = new JniDBFactory();
try {
@@ -131,7 +131,7 @@ public class LeveldbTimelineStateStore extends
@Override
protected void closeStorage() throws IOException {
- IOUtils.cleanup(LOG, db);
+ IOUtils.cleanupWithLogger(LOG, db);
}
@Override
@@ -168,8 +168,8 @@ public class LeveldbTimelineStateStore extends
} catch (DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, ds);
- IOUtils.cleanup(LOG, batch);
+ IOUtils.cleanupWithLogger(LOG, ds);
+ IOUtils.cleanupWithLogger(LOG, batch);
}
}
@@ -239,7 +239,7 @@ public class LeveldbTimelineStateStore extends
key.write(dataStream);
dataStream.close();
} finally {
- IOUtils.cleanup(LOG, dataStream);
+ IOUtils.cleanupWithLogger(LOG, dataStream);
}
return memStream.toByteArray();
}
@@ -253,7 +253,7 @@ public class LeveldbTimelineStateStore extends
try {
key.readFields(in);
} finally {
- IOUtils.cleanup(LOG, in);
+ IOUtils.cleanupWithLogger(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
@@ -267,7 +267,7 @@ public class LeveldbTimelineStateStore extends
try {
data.readFields(in);
} finally {
- IOUtils.cleanup(LOG, in);
+ IOUtils.cleanupWithLogger(LOG, in);
}
state.tokenState.put(data.getTokenIdentifier(), data.getRenewDate());
}
@@ -290,7 +290,7 @@ public class LeveldbTimelineStateStore extends
++numKeys;
}
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
return numKeys;
}
@@ -314,7 +314,7 @@ public class LeveldbTimelineStateStore extends
} catch (DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(LOG, iterator);
+ IOUtils.cleanupWithLogger(LOG, iterator);
}
return numTokens;
}
@@ -332,7 +332,7 @@ public class LeveldbTimelineStateStore extends
try {
state.latestSequenceNumber = in.readInt();
} finally {
- IOUtils.cleanup(LOG, in);
+ IOUtils.cleanupWithLogger(LOG, in);
}
}
}
@@ -412,7 +412,7 @@ public class LeveldbTimelineStateStore extends
String incompatibleMessage =
"Incompatible version for timeline state store: expecting version "
+ getCurrentVersion() + ", but loading version " + loadedVersion;
- LOG.fatal(incompatibleMessage);
+ LOG.error(incompatibleMessage);
throw new IOException(incompatibleMessage);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
index 25252fc..6c32eec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
@@ -24,8 +24,6 @@ import java.util.HashMap;
import java.util.Map;
import org.apache.commons.collections.map.LRUMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
@@ -41,6 +39,8 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.util.StringHelper;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <code>TimelineACLsManager</code> check the entity level timeline data access.
@@ -48,7 +48,8 @@ import com.google.common.annotations.VisibleForTesting;
@Private
public class TimelineACLsManager {
- private static final Log LOG = LogFactory.getLog(TimelineACLsManager.class);
+ private static final Logger LOG = LoggerFactory.
+ getLogger(TimelineACLsManager.class);
private static final int DOMAIN_ACCESS_ENTRY_CACHE_SIZE = 100;
private AdminACLsManager adminAclsManager;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
index 60a0348..0c6892a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.timeline.security;
import java.io.IOException;
import java.util.Map.Entry;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@@ -35,6 +33,8 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The service wrapper of {@link TimelineDelegationTokenSecretManager}
@@ -118,8 +118,8 @@ public class TimelineDelegationTokenSecretManagerService extends
public static class TimelineDelegationTokenSecretManager extends
AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
- public static final Log LOG =
- LogFactory.getLog(TimelineDelegationTokenSecretManager.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TimelineDelegationTokenSecretManager.class);
private TimelineStateStore stateStore;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index ad4e2bb..be8e3c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -43,8 +43,6 @@ import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.JettyUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
@@ -68,13 +66,16 @@ import org.apache.hadoop.yarn.webapp.NotFoundException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@Singleton
@Path("/ws/v1/timeline")
//TODO: support XML serialization/deserialization
public class TimelineWebServices {
- private static final Log LOG = LogFactory.getLog(TimelineWebServices.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TimelineWebServices.class);
private TimelineDataManager timelineDataManager;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index 15a00d2..df4adbe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -32,8 +32,6 @@ import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -51,12 +49,14 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestFileSystemApplicationHistoryStore extends
ApplicationHistoryStoreTestUtils {
- private static Log LOG = LogFactory
- .getLog(TestFileSystemApplicationHistoryStore.class.getName());
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TestFileSystemApplicationHistoryStore.class.getName());
private FileSystem fs;
private Path fsWorkingPath;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a78c0ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
index 0c292d8..f68a1c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java
@@ -160,7 +160,7 @@ public class TestLeveldbTimelineStore extends TimelineStoreTestUtils {
} catch(DBException e) {
throw new IOException(e);
} finally {
- IOUtils.cleanup(null, iterator, pfIterator);
+ IOUtils.cleanupWithLogger(null, iterator, pfIterator);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org